Permalink
Newer
100644
3997 lines (3764 sloc)
183 KB
1
/* tslint:disable */
2
/* eslint-disable */
3
/**
4
* OpenAI API
5
* APIs for sampling from and fine-tuning language models
6
*
8
*
9
*
10
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
11
* https://openapi-generator.tech
12
* Do not edit the class manually.
13
*/
14
15
16
import type { Configuration } from './configuration';
17
import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
18
import globalAxios from 'axios';
19
// Some imports not used depending on template conditions
20
// @ts-ignore
21
import { DUMMY_BASE_URL, assertParamExists, setApiKeyToObject, setBasicAuthToObject, setBearerAuthToObject, setOAuthToObject, setSearchParams, serializeDataIfNeeded, toPathString, createRequestFunction } from './common';
24
import { BASE_PATH, COLLECTION_FORMATS, BaseAPI, RequiredError } from './base';
25
26
/**
27
*
28
* @export
29
* @interface ChatCompletionRequestMessage
30
*/
31
export interface ChatCompletionRequestMessage {
32
/**
33
* The role of the author of this message.
34
* @type {string}
35
* @memberof ChatCompletionRequestMessage
36
*/
37
'role': ChatCompletionRequestMessageRoleEnum;
38
/**
39
* The contents of the message
40
* @type {string}
41
* @memberof ChatCompletionRequestMessage
42
*/
43
'content': string;
44
/**
45
* The name of the user in a multi-user chat
46
* @type {string}
47
* @memberof ChatCompletionRequestMessage
48
*/
49
'name'?: string;
50
}
51
52
export const ChatCompletionRequestMessageRoleEnum = {
53
System: 'system',
54
User: 'user',
55
Assistant: 'assistant'
56
} as const;
57
58
export type ChatCompletionRequestMessageRoleEnum = typeof ChatCompletionRequestMessageRoleEnum[keyof typeof ChatCompletionRequestMessageRoleEnum];
59
60
/**
61
*
62
* @export
63
* @interface ChatCompletionResponseMessage
64
*/
65
export interface ChatCompletionResponseMessage {
66
/**
67
* The role of the author of this message.
68
* @type {string}
69
* @memberof ChatCompletionResponseMessage
70
*/
71
'role': ChatCompletionResponseMessageRoleEnum;
72
/**
73
* The contents of the message
74
* @type {string}
75
* @memberof ChatCompletionResponseMessage
76
*/
77
'content': string;
78
}
79
80
export const ChatCompletionResponseMessageRoleEnum = {
81
System: 'system',
82
User: 'user',
83
Assistant: 'assistant'
84
} as const;
85
86
export type ChatCompletionResponseMessageRoleEnum = typeof ChatCompletionResponseMessageRoleEnum[keyof typeof ChatCompletionResponseMessageRoleEnum];
87
88
/**
89
*
90
* @export
91
* @interface CreateAnswerRequest
92
*/
93
export interface CreateAnswerRequest {
94
/**
95
* ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
100
/**
101
* Question to get answered.
102
* @type {string}
103
* @memberof CreateAnswerRequest
104
*/
106
/**
107
* List of (question, answer) pairs that will help steer the model towards the tone and answer format you\'d like. We recommend adding 2 to 3 examples.
108
* @type {Array<any>}
109
* @memberof CreateAnswerRequest
110
*/
112
/**
113
* A text snippet containing the contextual information used to generate the answers for the `examples` you provide.
114
* @type {string}
115
* @memberof CreateAnswerRequest
116
*/
118
/**
119
* List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. You should specify either `documents` or a `file`, but not both.
120
* @type {Array<string>}
121
* @memberof CreateAnswerRequest
122
*/
123
'documents'?: Array<string> | null;
124
/**
125
* The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `documents` or a `file`, but not both.
126
* @type {string}
127
* @memberof CreateAnswerRequest
128
*/
129
'file'?: string | null;
130
/**
131
* ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
132
* @type {string}
133
* @memberof CreateAnswerRequest
134
*/
135
'search_model'?: string | null;
136
/**
137
* The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.
138
* @type {number}
139
* @memberof CreateAnswerRequest
140
*/
141
'max_rerank'?: number | null;
142
/**
143
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
144
* @type {number}
145
* @memberof CreateAnswerRequest
146
*/
147
'temperature'?: number | null;
148
/**
149
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
150
* @type {number}
151
* @memberof CreateAnswerRequest
152
*/
153
'logprobs'?: number | null;
154
/**
155
* The maximum number of tokens allowed for the generated answer
156
* @type {number}
157
* @memberof CreateAnswerRequest
158
*/
159
'max_tokens'?: number | null;
160
/**
166
/**
167
* How many answers to generate for each question.
168
* @type {number}
169
* @memberof CreateAnswerRequest
170
*/
171
'n'?: number | null;
172
/**
173
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
174
* @type {object}
175
* @memberof CreateAnswerRequest
176
*/
177
'logit_bias'?: object | null;
178
/**
179
* A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
180
* @type {boolean}
181
* @memberof CreateAnswerRequest
182
*/
183
'return_metadata'?: boolean | null;
184
/**
185
* If set to `true`, the returned JSON will include a \"prompt\" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes.
186
* @type {boolean}
187
* @memberof CreateAnswerRequest
188
*/
189
'return_prompt'?: boolean | null;
190
/**
191
* If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion.
192
* @type {Array<any>}
193
* @memberof CreateAnswerRequest
194
*/
195
'expand'?: Array<any> | null;
197
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
198
* @type {string}
199
* @memberof CreateAnswerRequest
200
*/
201
'user'?: string;
203
/**
204
* @type CreateAnswerRequestStop
205
* Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
206
* @export
207
*/
208
export type CreateAnswerRequestStop = Array<string> | string;
209
210
/**
211
*
212
* @export
213
* @interface CreateAnswerResponse
214
*/
215
export interface CreateAnswerResponse {
216
/**
217
*
218
* @type {string}
219
* @memberof CreateAnswerResponse
220
*/
221
'object'?: string;
222
/**
223
*
224
* @type {string}
225
* @memberof CreateAnswerResponse
226
*/
227
'model'?: string;
228
/**
229
*
230
* @type {string}
231
* @memberof CreateAnswerResponse
232
*/
233
'search_model'?: string;
234
/**
235
*
236
* @type {string}
237
* @memberof CreateAnswerResponse
238
*/
239
'completion'?: string;
240
/**
241
*
242
* @type {Array<string>}
243
* @memberof CreateAnswerResponse
244
*/
245
'answers'?: Array<string>;
246
/**
247
*
272
/**
273
*
274
* @export
275
* @interface CreateChatCompletionRequest
276
*/
277
export interface CreateChatCompletionRequest {
278
/**
279
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
280
* @type {string}
281
* @memberof CreateChatCompletionRequest
282
*/
283
'model': string;
284
/**
285
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
286
* @type {Array<ChatCompletionRequestMessage>}
287
* @memberof CreateChatCompletionRequest
288
*/
289
'messages': Array<ChatCompletionRequestMessage>;
290
/**
291
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
292
* @type {number}
293
* @memberof CreateChatCompletionRequest
294
*/
295
'temperature'?: number | null;
296
/**
297
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
298
* @type {number}
299
* @memberof CreateChatCompletionRequest
300
*/
301
'top_p'?: number | null;
302
/**
303
* How many chat completion choices to generate for each input message.
304
* @type {number}
305
* @memberof CreateChatCompletionRequest
306
*/
307
'n'?: number | null;
308
/**
309
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
310
* @type {boolean}
311
* @memberof CreateChatCompletionRequest
312
*/
313
'stream'?: boolean | null;
314
/**
315
*
316
* @type {CreateChatCompletionRequestStop}
317
* @memberof CreateChatCompletionRequest
318
*/
319
'stop'?: CreateChatCompletionRequestStop;
320
/**
321
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
322
* @type {number}
323
* @memberof CreateChatCompletionRequest
324
*/
325
'max_tokens'?: number;
326
/**
327
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
328
* @type {number}
329
* @memberof CreateChatCompletionRequest
330
*/
331
'presence_penalty'?: number | null;
332
/**
333
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
334
* @type {number}
335
* @memberof CreateChatCompletionRequest
336
*/
337
'frequency_penalty'?: number | null;
338
/**
339
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
340
* @type {object}
341
* @memberof CreateChatCompletionRequest
342
*/
343
'logit_bias'?: object | null;
344
/**
345
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
346
* @type {string}
347
* @memberof CreateChatCompletionRequest
348
*/
349
'user'?: string;
350
}
351
/**
352
* @type CreateChatCompletionRequestStop
353
* Up to 4 sequences where the API will stop generating further tokens.
354
* @export
355
*/
356
export type CreateChatCompletionRequestStop = Array<string> | string;
357
358
/**
359
*
360
* @export
361
* @interface CreateChatCompletionResponse
362
*/
363
export interface CreateChatCompletionResponse {
364
/**
365
*
366
* @type {string}
367
* @memberof CreateChatCompletionResponse
368
*/
369
'id': string;
370
/**
371
*
372
* @type {string}
373
* @memberof CreateChatCompletionResponse
374
*/
375
'object': string;
376
/**
377
*
378
* @type {number}
379
* @memberof CreateChatCompletionResponse
380
*/
381
'created': number;
382
/**
383
*
384
* @type {string}
385
* @memberof CreateChatCompletionResponse
386
*/
387
'model': string;
388
/**
389
*
390
* @type {Array<CreateChatCompletionResponseChoicesInner>}
391
* @memberof CreateChatCompletionResponse
392
*/
393
'choices': Array<CreateChatCompletionResponseChoicesInner>;
394
/**
395
*
396
* @type {CreateCompletionResponseUsage}
397
* @memberof CreateChatCompletionResponse
398
*/
399
'usage'?: CreateCompletionResponseUsage;
400
}
401
/**
402
*
403
* @export
404
* @interface CreateChatCompletionResponseChoicesInner
405
*/
406
export interface CreateChatCompletionResponseChoicesInner {
407
/**
408
*
409
* @type {number}
410
* @memberof CreateChatCompletionResponseChoicesInner
411
*/
412
'index'?: number;
413
/**
414
*
415
* @type {ChatCompletionResponseMessage}
416
* @memberof CreateChatCompletionResponseChoicesInner
417
*/
418
'message'?: ChatCompletionResponseMessage;
419
/**
420
*
421
* @type {string}
422
* @memberof CreateChatCompletionResponseChoicesInner
423
*/
424
'finish_reason'?: string;
425
}
426
/**
427
*
428
* @export
429
* @interface CreateClassificationRequest
430
*/
431
export interface CreateClassificationRequest {
432
/**
433
* ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
438
/**
439
* Query to be classified.
440
* @type {string}
441
* @memberof CreateClassificationRequest
442
*/
444
/**
445
* A list of examples with labels, in the following format: `[[\"The movie is so interesting.\", \"Positive\"], [\"It is quite boring.\", \"Negative\"], ...]` All the label strings will be normalized to be capitalized. You should specify either `examples` or `file`, but not both.
446
* @type {Array<any>}
447
* @memberof CreateClassificationRequest
448
*/
449
'examples'?: Array<any> | null;
450
/**
451
* The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `examples` or `file`, but not both.
452
* @type {string}
453
* @memberof CreateClassificationRequest
454
*/
455
'file'?: string | null;
456
/**
457
* The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized.
458
* @type {Array<string>}
459
* @memberof CreateClassificationRequest
460
*/
461
'labels'?: Array<string> | null;
462
/**
463
* ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
464
* @type {string}
465
* @memberof CreateClassificationRequest
466
*/
467
'search_model'?: string | null;
468
/**
469
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
470
* @type {number}
471
* @memberof CreateClassificationRequest
472
*/
473
'temperature'?: number | null;
474
/**
475
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
476
* @type {number}
477
* @memberof CreateClassificationRequest
478
*/
479
'logprobs'?: number | null;
480
/**
481
* The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.
482
* @type {number}
483
* @memberof CreateClassificationRequest
484
*/
485
'max_examples'?: number | null;
486
/**
487
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
488
* @type {object}
489
* @memberof CreateClassificationRequest
490
*/
491
'logit_bias'?: object | null;
492
/**
493
* If set to `true`, the returned JSON will include a \"prompt\" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes.
494
* @type {boolean}
495
* @memberof CreateClassificationRequest
496
*/
497
'return_prompt'?: boolean | null;
498
/**
499
* A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
500
* @type {boolean}
501
* @memberof CreateClassificationRequest
502
*/
503
'return_metadata'?: boolean | null;
504
/**
505
* If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion.
506
* @type {Array<any>}
507
* @memberof CreateClassificationRequest
508
*/
509
'expand'?: Array<any> | null;
511
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
512
* @type {string}
513
* @memberof CreateClassificationRequest
514
*/
515
'user'?: string;
516
}
517
/**
518
*
519
* @export
520
* @interface CreateClassificationResponse
521
*/
522
export interface CreateClassificationResponse {
523
/**
524
*
525
* @type {string}
526
* @memberof CreateClassificationResponse
527
*/
528
'object'?: string;
529
/**
530
*
531
* @type {string}
532
* @memberof CreateClassificationResponse
533
*/
534
'model'?: string;
535
/**
536
*
537
* @type {string}
538
* @memberof CreateClassificationResponse
539
*/
540
'search_model'?: string;
541
/**
542
*
543
* @type {string}
544
* @memberof CreateClassificationResponse
545
*/
546
'completion'?: string;
547
/**
548
*
549
* @type {string}
550
* @memberof CreateClassificationResponse
551
*/
552
'label'?: string;
553
/**
554
*
558
'selected_examples'?: Array<CreateClassificationResponseSelectedExamplesInner>;
592
* ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
604
* The suffix that comes after a completion of inserted text.
605
* @type {string}
609
/**
610
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
611
* @type {number}
612
* @memberof CreateCompletionRequest
613
*/
614
'max_tokens'?: number | null;
615
/**
616
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
617
* @type {number}
618
* @memberof CreateCompletionRequest
619
*/
620
'temperature'?: number | null;
621
/**
622
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
623
* @type {number}
624
* @memberof CreateCompletionRequest
625
*/
626
'top_p'?: number | null;
627
/**
628
* How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
629
* @type {number}
630
* @memberof CreateCompletionRequest
631
*/
632
'n'?: number | null;
633
/**
634
* Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
635
* @type {boolean}
636
* @memberof CreateCompletionRequest
637
*/
638
'stream'?: boolean | null;
639
/**
640
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.
641
* @type {number}
642
* @memberof CreateCompletionRequest
643
*/
644
'logprobs'?: number | null;
645
/**
646
* Echo back the prompt in addition to the completion
647
* @type {boolean}
648
* @memberof CreateCompletionRequest
649
*/
650
'echo'?: boolean | null;
651
/**
657
/**
658
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
659
* @type {number}
660
* @memberof CreateCompletionRequest
661
*/
662
'presence_penalty'?: number | null;
663
/**
664
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
665
* @type {number}
666
* @memberof CreateCompletionRequest
667
*/
668
'frequency_penalty'?: number | null;
669
/**
670
* Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
671
* @type {number}
672
* @memberof CreateCompletionRequest
673
*/
674
'best_of'?: number | null;
675
/**
676
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
677
* @type {object}
678
* @memberof CreateCompletionRequest
679
*/
680
'logit_bias'?: object | null;
682
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
683
* @type {string}
684
* @memberof CreateCompletionRequest
685
*/
686
'user'?: string;
688
/**
689
* @type CreateCompletionRequestPrompt
690
* The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
691
* @export
692
*/
693
export type CreateCompletionRequestPrompt = Array<any> | Array<number> | Array<string> | string;
694
695
/**
696
* @type CreateCompletionRequestStop
697
* Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
698
* @export
699
*/
700
export type CreateCompletionRequestStop = Array<string> | string;
701
702
/**
703
*
704
* @export
705
* @interface CreateCompletionResponse
706
*/
707
export interface CreateCompletionResponse {
708
/**
709
*
710
* @type {string}
711
* @memberof CreateCompletionResponse
712
*/
714
/**
715
*
716
* @type {string}
717
* @memberof CreateCompletionResponse
718
*/
720
/**
721
*
722
* @type {number}
723
* @memberof CreateCompletionResponse
724
*/
726
/**
727
*
728
* @type {string}
729
* @memberof CreateCompletionResponse
730
*/
737
'choices': Array<CreateCompletionResponseChoicesInner>;
738
/**
739
*
740
* @type {CreateCompletionResponseUsage}
741
* @memberof CreateCompletionResponse
742
*/
743
'usage'?: CreateCompletionResponseUsage;
765
* @type {CreateCompletionResponseChoicesInnerLogprobs}
766
* @memberof CreateCompletionResponseChoicesInner
786
*/
787
'tokens'?: Array<string>;
788
/**
789
*
790
* @type {Array<number>}
792
*/
793
'token_logprobs'?: Array<number>;
794
/**
795
*
796
* @type {Array<object>}
798
*/
799
'top_logprobs'?: Array<object>;
800
/**
801
*
802
* @type {Array<number>}
807
/**
808
*
809
* @export
810
* @interface CreateCompletionResponseUsage
811
*/
812
export interface CreateCompletionResponseUsage {
813
/**
814
*
815
* @type {number}
816
* @memberof CreateCompletionResponseUsage
817
*/
818
'prompt_tokens': number;
819
/**
820
*
821
* @type {number}
822
* @memberof CreateCompletionResponseUsage
823
*/
824
'completion_tokens': number;
825
/**
826
*
827
* @type {number}
828
* @memberof CreateCompletionResponseUsage
829
*/
830
'total_tokens': number;
831
}
832
/**
833
*
834
* @export
835
* @interface CreateEditRequest
836
*/
837
export interface CreateEditRequest {
839
* ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
840
* @type {string}
841
* @memberof CreateEditRequest
842
*/
843
'model': string;
844
/**
845
* The input text to use as a starting point for the edit.
846
* @type {string}
847
* @memberof CreateEditRequest
848
*/
849
'input'?: string | null;
850
/**
851
* The instruction that tells the model how to edit the prompt.
852
* @type {string}
853
* @memberof CreateEditRequest
854
*/
855
'instruction': string;
856
/**
857
* How many edits to generate for the input and instruction.
858
* @type {number}
859
* @memberof CreateEditRequest
860
*/
861
'n'?: number | null;
863
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
864
* @type {number}
865
* @memberof CreateEditRequest
866
*/
867
'temperature'?: number | null;
868
/**
869
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
870
* @type {number}
871
* @memberof CreateEditRequest
872
*/
873
'top_p'?: number | null;
874
}
875
/**
876
*
877
* @export
878
* @interface CreateEditResponse
879
*/
880
export interface CreateEditResponse {
881
/**
882
*
883
* @type {string}
884
* @memberof CreateEditResponse
885
*/
898
'choices': Array<CreateCompletionResponseChoicesInner>;
899
/**
900
*
901
* @type {CreateCompletionResponseUsage}
902
* @memberof CreateEditResponse
903
*/
904
'usage': CreateCompletionResponseUsage;
906
/**
907
*
908
* @export
909
* @interface CreateEmbeddingRequest
910
*/
911
export interface CreateEmbeddingRequest {
912
/**
913
* ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
914
* @type {string}
915
* @memberof CreateEmbeddingRequest
916
*/
917
'model': string;
918
/**
919
*
920
* @type {CreateEmbeddingRequestInput}
925
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
926
* @type {string}
927
* @memberof CreateEmbeddingRequest
928
*/
929
'user'?: string;
933
* Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.
934
* @export
935
*/
936
export type CreateEmbeddingRequestInput = Array<any> | Array<number> | Array<string> | string;
937
938
/**
939
*
940
* @export
941
* @interface CreateEmbeddingResponse
942
*/
943
export interface CreateEmbeddingResponse {
944
/**
945
*
946
* @type {string}
947
* @memberof CreateEmbeddingResponse
948
*/
950
/**
951
*
952
* @type {string}
953
* @memberof CreateEmbeddingResponse
954
*/
961
'data': Array<CreateEmbeddingResponseDataInner>;
962
/**
963
*
964
* @type {CreateEmbeddingResponseUsage}
965
* @memberof CreateEmbeddingResponse
966
*/
967
'usage': CreateEmbeddingResponseUsage;
992
'embedding': Array<number>;
993
}
994
/**
995
*
996
* @export
997
* @interface CreateEmbeddingResponseUsage
998
*/
999
export interface CreateEmbeddingResponseUsage {
1000
/**
1001
*
1002
* @type {number}
1003
* @memberof CreateEmbeddingResponseUsage
1004
*/
1005
'prompt_tokens': number;
1006
/**
1007
*
1008
* @type {number}
1009
* @memberof CreateEmbeddingResponseUsage
1010
*/
1011
'total_tokens': number;
1012
}
1013
/**
1014
*
1015
* @export
1016
* @interface CreateFineTuneRequest
1017
*/
1018
export interface CreateFineTuneRequest {
1019
/**
1020
* The ID of an uploaded file that contains training data. See [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
1021
* @type {string}
1022
* @memberof CreateFineTuneRequest
1023
*/
1025
/**
1026
* The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
1027
* @type {string}
1028
* @memberof CreateFineTuneRequest
1029
*/
1030
'validation_file'?: string | null;
1031
/**
1032
* The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", \"davinci\", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://platform.openai.com/docs/models) documentation.
1033
* @type {string}
1034
* @memberof CreateFineTuneRequest
1035
*/
1036
'model'?: string | null;
1037
/**
1038
* The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
1039
* @type {number}
1040
* @memberof CreateFineTuneRequest
1041
*/
1042
'n_epochs'?: number | null;
1043
/**
1044
* The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, we\'ve found that larger batch sizes tend to work better for larger datasets.
1045
* @type {number}
1046
* @memberof CreateFineTuneRequest
1047
*/
1048
'batch_size'?: number | null;
1049
/**
1050
* The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final `batch_size` (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results.
1051
* @type {number}
1052
* @memberof CreateFineTuneRequest
1053
*/
1054
'learning_rate_multiplier'?: number | null;
1055
/**
1056
* The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short. If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt.
1057
* @type {number}
1058
* @memberof CreateFineTuneRequest
1059
*/
1060
'prompt_loss_weight'?: number | null;
1061
/**
1062
* If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). In order to compute classification metrics, you must provide a `validation_file`. Additionally, you must specify `classification_n_classes` for multiclass classification or `classification_positive_class` for binary classification.
1063
* @type {boolean}
1064
* @memberof CreateFineTuneRequest
1065
*/
1066
'compute_classification_metrics'?: boolean | null;
1067
/**
1068
* The number of classes in a classification task. This parameter is required for multiclass classification.
1069
* @type {number}
1070
* @memberof CreateFineTuneRequest
1071
*/
1072
'classification_n_classes'?: number | null;
1073
/**
1074
* The positive class in binary classification. This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification.
1075
* @type {string}
1076
* @memberof CreateFineTuneRequest
1077
*/
1078
'classification_positive_class'?: string | null;
1079
/**
1080
* If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall.
1081
* @type {Array<number>}
1082
* @memberof CreateFineTuneRequest
1083
*/
1084
'classification_betas'?: Array<number> | null;
1085
/**
1086
* A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \"custom-model-name\" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
1087
* @type {string}
1088
* @memberof CreateFineTuneRequest
1089
*/
1090
'suffix'?: string | null;
1092
/**
1093
*
1094
* @export
1095
* @interface CreateImageRequest
1096
*/
1097
export interface CreateImageRequest {
1098
/**
1099
* A text description of the desired image(s). The maximum length is 1000 characters.
1100
* @type {string}
1101
* @memberof CreateImageRequest
1102
*/
1103
'prompt': string;
1104
/**
1105
* The number of images to generate. Must be between 1 and 10.
1106
* @type {number}
1107
* @memberof CreateImageRequest
1108
*/
1109
'n'?: number | null;
1110
/**
1111
* The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
1112
* @type {string}
1113
* @memberof CreateImageRequest
1114
*/
1115
'size'?: CreateImageRequestSizeEnum;
1116
/**
1117
* The format in which the generated images are returned. Must be one of `url` or `b64_json`.
1118
* @type {string}
1119
* @memberof CreateImageRequest
1120
*/
1121
'response_format'?: CreateImageRequestResponseFormatEnum;
1122
/**
1123
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
1124
* @type {string}
1125
* @memberof CreateImageRequest
1126
*/
1127
'user'?: string;
1128
}
1129
1130
export const CreateImageRequestSizeEnum = {
1131
_256x256: '256x256',
1132
_512x512: '512x512',
1133
_1024x1024: '1024x1024'
1134
} as const;
1135
1136
export type CreateImageRequestSizeEnum = typeof CreateImageRequestSizeEnum[keyof typeof CreateImageRequestSizeEnum];
1137
export const CreateImageRequestResponseFormatEnum = {
1138
Url: 'url',
1139
B64Json: 'b64_json'
1140
} as const;
1141
1142
export type CreateImageRequestResponseFormatEnum = typeof CreateImageRequestResponseFormatEnum[keyof typeof CreateImageRequestResponseFormatEnum];
1143
1144
/**
1145
*
1146
* @export
1147
* @interface CreateModerationRequest
1148
*/
1149
export interface CreateModerationRequest {
1150
/**
1151
*
1152
* @type {CreateModerationRequestInput}
1153
* @memberof CreateModerationRequest
1154
*/
1155
'input': CreateModerationRequestInput;
1156
/**
1157
* Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
1158
* @type {string}
1159
* @memberof CreateModerationRequest
1160
*/
1161
'model'?: string;
1162
}
1163
/**
1164
* @type CreateModerationRequestInput
1165
* The input text to classify
1166
* @export
1167
*/
1168
export type CreateModerationRequestInput = Array<string> | string;
1169
1170
/**
1171
*
1172
* @export
1173
* @interface CreateModerationResponse
1174
*/
1175
export interface CreateModerationResponse {
1176
/**
1177
*
1178
* @type {string}
1179
* @memberof CreateModerationResponse
1180
*/
1181
'id': string;
1182
/**
1183
*
1184
* @type {string}
1185
* @memberof CreateModerationResponse
1186
*/
1187
'model': string;
1188
/**
1189
*
1190
* @type {Array<CreateModerationResponseResultsInner>}
1191
* @memberof CreateModerationResponse
1192
*/
1193
'results': Array<CreateModerationResponseResultsInner>;
1194
}
1195
/**
1196
*
1197
* @export
1198
* @interface CreateModerationResponseResultsInner
1199
*/
1200
export interface CreateModerationResponseResultsInner {
1201
/**
1202
*
1203
* @type {boolean}
1204
* @memberof CreateModerationResponseResultsInner
1205
*/
1206
'flagged': boolean;
1207
/**
1208
*
1209
* @type {CreateModerationResponseResultsInnerCategories}
1210
* @memberof CreateModerationResponseResultsInner
1211
*/
1212
'categories': CreateModerationResponseResultsInnerCategories;
1213
/**
1214
*
1215
* @type {CreateModerationResponseResultsInnerCategoryScores}
1216
* @memberof CreateModerationResponseResultsInner
1217
*/
1218
'category_scores': CreateModerationResponseResultsInnerCategoryScores;
1219
}
1220
/**
1221
*
1222
* @export
1223
* @interface CreateModerationResponseResultsInnerCategories
1224
*/
1225
export interface CreateModerationResponseResultsInnerCategories {
1226
/**
1227
*
1228
* @type {boolean}
1229
* @memberof CreateModerationResponseResultsInnerCategories
1230
*/
1231
'hate': boolean;
1232
/**
1233
*
1234
* @type {boolean}
1235
* @memberof CreateModerationResponseResultsInnerCategories
1236
*/
1237
'hate/threatening': boolean;
1238
/**
1239
*
1240
* @type {boolean}
1241
* @memberof CreateModerationResponseResultsInnerCategories
1242
*/
1243
'self-harm': boolean;
1244
/**
1245
*
1246
* @type {boolean}
1247
* @memberof CreateModerationResponseResultsInnerCategories
1248
*/
1249
'sexual': boolean;
1250
/**
1251
*
1252
* @type {boolean}
1253
* @memberof CreateModerationResponseResultsInnerCategories
1254
*/
1255
'sexual/minors': boolean;
1256
/**
1257
*
1258
* @type {boolean}
1259
* @memberof CreateModerationResponseResultsInnerCategories
1260
*/
1261
'violence': boolean;
1262
/**
1263
*
1264
* @type {boolean}
1265
* @memberof CreateModerationResponseResultsInnerCategories
1266
*/
1267
'violence/graphic': boolean;
1268
}
1269
/**
1270
*
1271
* @export
1272
* @interface CreateModerationResponseResultsInnerCategoryScores
1273
*/
1274
export interface CreateModerationResponseResultsInnerCategoryScores {
1275
/**
1276
*
1277
* @type {number}
1278
* @memberof CreateModerationResponseResultsInnerCategoryScores
1279
*/
1280
'hate': number;
1281
/**
1282
*
1283
* @type {number}
1284
* @memberof CreateModerationResponseResultsInnerCategoryScores
1285
*/
1286
'hate/threatening': number;
1287
/**
1288
*
1289
* @type {number}
1290
* @memberof CreateModerationResponseResultsInnerCategoryScores
1291
*/
1292
'self-harm': number;
1293
/**
1294
*
1295
* @type {number}
1296
* @memberof CreateModerationResponseResultsInnerCategoryScores
1297
*/
1298
'sexual': number;
1299
/**
1300
*
1301
* @type {number}
1302
* @memberof CreateModerationResponseResultsInnerCategoryScores
1303
*/
1304
'sexual/minors': number;
1305
/**
1306
*
1307
* @type {number}
1308
* @memberof CreateModerationResponseResultsInnerCategoryScores
1309
*/
1310
'violence': number;
1311
/**
1312
*
1313
* @type {number}
1314
* @memberof CreateModerationResponseResultsInnerCategoryScores
1315
*/
1316
'violence/graphic': number;
1317
}
1318
/**
1319
*
1320
* @export
1321
* @interface CreateSearchRequest
1322
*/
1323
export interface CreateSearchRequest {
1324
/**
1325
* Query to search against the documents.
1326
* @type {string}
1327
* @memberof CreateSearchRequest
1328
*/
1329
'query': string;
1330
/**
1331
* Up to 200 documents to search over, provided as a list of strings. The maximum document length (in tokens) is 2034 minus the number of tokens in the query. You should specify either `documents` or a `file`, but not both.
1332
* @type {Array<string>}
1333
* @memberof CreateSearchRequest
1334
*/
1335
'documents'?: Array<string> | null;
1336
/**
1337
* The ID of an uploaded file that contains documents to search over. You should specify either `documents` or a `file`, but not both.
1338
* @type {string}
1339
* @memberof CreateSearchRequest
1340
*/
1341
'file'?: string | null;
1342
/**
1343
* The maximum number of documents to be re-ranked and returned by search. This flag only takes effect when `file` is set.
1344
* @type {number}
1345
* @memberof CreateSearchRequest
1346
*/
1347
'max_rerank'?: number | null;
1348
/**
1349
* A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
1350
* @type {boolean}
1351
* @memberof CreateSearchRequest
1352
*/
1353
'return_metadata'?: boolean | null;
1355
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
1356
* @type {string}
1357
* @memberof CreateSearchRequest
1358
*/
1359
'user'?: string;
1360
}
1361
/**
1362
*
1363
* @export
1364
* @interface CreateSearchResponse
1365
*/
1366
export interface CreateSearchResponse {
1367
/**
1368
*
1369
* @type {string}
1370
* @memberof CreateSearchResponse
1371
*/
1372
'object'?: string;
1373
/**
1374
*
1375
* @type {string}
1376
* @memberof CreateSearchResponse
1377
*/
1378
'model'?: string;
1379
/**
1380
*
1411
/**
1412
*
1413
* @export
1414
* @interface CreateTranscriptionResponse
1415
*/
1416
export interface CreateTranscriptionResponse {
1417
/**
1418
*
1419
* @type {string}
1420
* @memberof CreateTranscriptionResponse
1421
*/
1422
'text': string;
1423
}
1424
/**
1425
*
1426
* @export
1427
* @interface CreateTranslationResponse
1428
*/
1429
export interface CreateTranslationResponse {
1430
/**
1431
*
1432
* @type {string}
1433
* @memberof CreateTranslationResponse
1434
*/
1435
'text': string;
1436
}
1437
/**
1438
*
1439
* @export
1440
* @interface DeleteFileResponse
1441
*/
1442
export interface DeleteFileResponse {
1443
/**
1444
*
1445
* @type {string}
1446
* @memberof DeleteFileResponse
1447
*/
1449
/**
1450
*
1451
* @type {string}
1452
* @memberof DeleteFileResponse
1453
*/
1455
/**
1456
*
1457
* @type {boolean}
1458
* @memberof DeleteFileResponse
1459
*/
1462
/**
1463
*
1464
* @export
1465
* @interface DeleteModelResponse
1466
*/
1467
export interface DeleteModelResponse {
1468
/**
1469
*
1470
* @type {string}
1471
* @memberof DeleteModelResponse
1472
*/
1474
/**
1475
*
1476
* @type {string}
1477
* @memberof DeleteModelResponse
1478
*/
1480
/**
1481
*
1482
* @type {boolean}
1483
* @memberof DeleteModelResponse
1484
*/
1487
/**
1488
*
1489
* @export
1490
* @interface Engine
1491
*/
1492
export interface Engine {
1493
/**
1494
*
1495
* @type {string}
1496
* @memberof Engine
1497
*/
1517
}
1518
/**
1519
*
1520
* @export
1521
* @interface FineTune
1522
*/
1523
export interface FineTune {
1524
/**
1525
*
1526
* @type {string}
1527
* @memberof FineTune
1528
*/
1578
/**
1579
*
1580
* @type {Array<OpenAIFile>}
1581
* @memberof FineTune
1582
*/
1584
/**
1585
*
1586
* @type {Array<OpenAIFile>}
1587
* @memberof FineTune
1588
*/
1590
/**
1591
*
1592
* @type {Array<OpenAIFile>}
1593
* @memberof FineTune
1594
*/
1596
/**
1597
*
1598
* @type {Array<FineTuneEvent>}
1599
* @memberof FineTune
1600
*/
1601
'events'?: Array<FineTuneEvent>;
1602
}
1603
/**
1604
*
1605
* @export
1606
* @interface FineTuneEvent
1607
*/
1608
export interface FineTuneEvent {
1609
/**
1610
*
1611
* @type {string}
1612
* @memberof FineTuneEvent
1613
*/
1634
/**
1635
*
1636
* @export
1637
* @interface ImagesResponse
1638
*/
1639
export interface ImagesResponse {
1640
/**
1641
*
1642
* @type {number}
1643
* @memberof ImagesResponse
1644
*/
1645
'created': number;
1646
/**
1647
*
1648
* @type {Array<ImagesResponseDataInner>}
1649
* @memberof ImagesResponse
1650
*/
1651
'data': Array<ImagesResponseDataInner>;
1652
}
1653
/**
1654
*
1655
* @export
1656
* @interface ImagesResponseDataInner
1657
*/
1658
export interface ImagesResponseDataInner {
1659
/**
1660
*
1661
* @type {string}
1662
* @memberof ImagesResponseDataInner
1663
*/
1664
'url'?: string;
1665
/**
1666
*
1667
* @type {string}
1668
* @memberof ImagesResponseDataInner
1669
*/
1670
'b64_json'?: string;
1671
}
1672
/**
1673
*
1674
* @export
1675
* @interface ListEnginesResponse
1676
*/
1677
export interface ListEnginesResponse {
1678
/**
1679
*
1680
* @type {string}
1681
* @memberof ListEnginesResponse
1682
*/
1684
/**
1685
*
1686
* @type {Array<Engine>}
1687
* @memberof ListEnginesResponse
1688
*/
1690
}
1691
/**
1692
*
1693
* @export
1694
* @interface ListFilesResponse
1695
*/
1696
export interface ListFilesResponse {
1697
/**
1698
*
1699
* @type {string}
1700
* @memberof ListFilesResponse
1701
*/
1703
/**
1704
*
1705
* @type {Array<OpenAIFile>}
1706
* @memberof ListFilesResponse
1707
*/
1709
}
1710
/**
1711
*
1712
* @export
1713
* @interface ListFineTuneEventsResponse
1714
*/
1715
export interface ListFineTuneEventsResponse {
1716
/**
1717
*
1718
* @type {string}
1719
* @memberof ListFineTuneEventsResponse
1720
*/
1722
/**
1723
*
1724
* @type {Array<FineTuneEvent>}
1725
* @memberof ListFineTuneEventsResponse
1726
*/
1728
}
1729
/**
1730
*
1731
* @export
1732
* @interface ListFineTunesResponse
1733
*/
1734
export interface ListFineTunesResponse {
1735
/**
1736
*
1737
* @type {string}
1738
* @memberof ListFineTunesResponse
1739
*/
1741
/**
1742
*
1743
* @type {Array<FineTune>}
1744
* @memberof ListFineTunesResponse
1745
*/
1748
/**
1749
*
1750
* @export
1751
* @interface ListModelsResponse
1752
*/
1753
export interface ListModelsResponse {
1754
/**
1755
*
1756
* @type {string}
1757
* @memberof ListModelsResponse
1758
*/
1760
/**
1761
*
1762
* @type {Array<Model>}
1763
* @memberof ListModelsResponse
1764
*/
1766
}
1767
/**
1768
*
1769
* @export
1770
* @interface Model
1771
*/
1772
export interface Model {
1773
/**
1774
*
1775
* @type {string}
1776
* @memberof Model
1777
*/
1798
/**
1799
*
1800
* @export
1801
* @interface OpenAIFile
1802
*/
1803
export interface OpenAIFile {
1804
/**
1805
*
1806
* @type {string}
1807
* @memberof OpenAIFile
1808
*/
1840
/**
1841
*
1842
* @type {string}
1843
* @memberof OpenAIFile
1844
*/
1845
'status'?: string;
1846
/**
1847
*
1848
* @type {object}
1849
* @memberof OpenAIFile
1850
*/
1851
'status_details'?: object | null;
1852
}
1853
1854
/**
1855
* OpenAIApi - axios parameter creator
1856
* @export
1857
*/
1858
export const OpenAIApiAxiosParamCreator = function (configuration?: Configuration) {
1859
return {
1860
/**
1861
*
1862
* @summary Immediately cancel a fine-tune job.
1863
* @param {string} fineTuneId The ID of the fine-tune job to cancel
1864
* @param {*} [options] Override http request option.
1865
* @throws {RequiredError}
1866
*/
1867
cancelFineTune: async (fineTuneId: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1868
// verify required parameter 'fineTuneId' is not null or undefined
1869
assertParamExists('cancelFineTune', 'fineTuneId', fineTuneId)
1870
const localVarPath = `/fine-tunes/{fine_tune_id}/cancel`
1871
.replace(`{${"fine_tune_id"}}`, encodeURIComponent(String(fineTuneId)));
1872
// use dummy base URL string because the URL constructor only accepts absolute URLs.
1873
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1874
let baseOptions;
1875
if (configuration) {
1876
baseOptions = configuration.baseOptions;
1877
}
1878
1879
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
1880
const localVarHeaderParameter = {} as any;
1881
const localVarQueryParameter = {} as any;
1882
1883
1884
1885
setSearchParams(localVarUrlObj, localVarQueryParameter);
1886
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1887
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
1888
1889
return {
1890
url: toPathString(localVarUrlObj),
1891
options: localVarRequestOptions,
1892
};
1893
},
1894
/**
1895
*
1896
* @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
1897
* @param {CreateAnswerRequest} createAnswerRequest
1898
* @param {*} [options] Override http request option.
1900
* @throws {RequiredError}
1901
*/
1902
createAnswer: async (createAnswerRequest: CreateAnswerRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1903
// verify required parameter 'createAnswerRequest' is not null or undefined
1904
assertParamExists('createAnswer', 'createAnswerRequest', createAnswerRequest)
1905
const localVarPath = `/answers`;
1906
// use dummy base URL string because the URL constructor only accepts absolute URLs.
1907
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1908
let baseOptions;
1909
if (configuration) {
1910
baseOptions = configuration.baseOptions;
1911
}
1912
1913
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
1914
const localVarHeaderParameter = {} as any;
1915
const localVarQueryParameter = {} as any;
1916
1917
1918
1919
localVarHeaderParameter['Content-Type'] = 'application/json';
1920
1921
setSearchParams(localVarUrlObj, localVarQueryParameter);
1922
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1923
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
1924
localVarRequestOptions.data = serializeDataIfNeeded(createAnswerRequest, localVarRequestOptions, configuration)
1925
1926
return {
1927
url: toPathString(localVarUrlObj),
1928
options: localVarRequestOptions,
1929
};
1930
},
1931
/**
1932
*
1933
* @summary Creates a completion for the chat message
1934
* @param {CreateChatCompletionRequest} createChatCompletionRequest
1935
* @param {*} [options] Override http request option.
1936
* @throws {RequiredError}
1937
*/
1938
createChatCompletion: async (createChatCompletionRequest: CreateChatCompletionRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1939
// verify required parameter 'createChatCompletionRequest' is not null or undefined
1940
assertParamExists('createChatCompletion', 'createChatCompletionRequest', createChatCompletionRequest)
1941
const localVarPath = `/chat/completions`;
1942
// use dummy base URL string because the URL constructor only accepts absolute URLs.
1943
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1944
let baseOptions;
1945
if (configuration) {
1946
baseOptions = configuration.baseOptions;
1947
}
1948
1949
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
1950
const localVarHeaderParameter = {} as any;
1951
const localVarQueryParameter = {} as any;
1952
1953
1954
1955
localVarHeaderParameter['Content-Type'] = 'application/json';
1956
1957
setSearchParams(localVarUrlObj, localVarQueryParameter);
1958
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1959
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
1960
localVarRequestOptions.data = serializeDataIfNeeded(createChatCompletionRequest, localVarRequestOptions, configuration)
1961
1962
return {
1963
url: toPathString(localVarUrlObj),
1964
options: localVarRequestOptions,
1965
};
1966
},
1967
/**
1968
*
1969
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
1970
* @param {CreateClassificationRequest} createClassificationRequest
1971
* @param {*} [options] Override http request option.
1973
* @throws {RequiredError}
1974
*/
1975
createClassification: async (createClassificationRequest: CreateClassificationRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1976
// verify required parameter 'createClassificationRequest' is not null or undefined
1977
assertParamExists('createClassification', 'createClassificationRequest', createClassificationRequest)
1978
const localVarPath = `/classifications`;
1979
// use dummy base URL string because the URL constructor only accepts absolute URLs.
1980
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1981
let baseOptions;
1982
if (configuration) {
1983
baseOptions = configuration.baseOptions;
1984
}
1985
1986
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
1987
const localVarHeaderParameter = {} as any;
1988
const localVarQueryParameter = {} as any;
1989
1990
1991
1992
localVarHeaderParameter['Content-Type'] = 'application/json';
1993
1994
setSearchParams(localVarUrlObj, localVarQueryParameter);
1995
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1996
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
1997
localVarRequestOptions.data = serializeDataIfNeeded(createClassificationRequest, localVarRequestOptions, configuration)
1998
1999
return {
2000
url: toPathString(localVarUrlObj),
2001
options: localVarRequestOptions,
2002
};
2003
},
2004
/**
2005
*
2007
* @param {CreateCompletionRequest} createCompletionRequest
2008
* @param {*} [options] Override http request option.
2009
* @throws {RequiredError}
2010
*/
2011
createCompletion: async (createCompletionRequest: CreateCompletionRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2012
// verify required parameter 'createCompletionRequest' is not null or undefined
2013
assertParamExists('createCompletion', 'createCompletionRequest', createCompletionRequest)
2014
const localVarPath = `/completions`;
2015
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2016
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2017
let baseOptions;
2018
if (configuration) {
2019
baseOptions = configuration.baseOptions;
2020
}
2021
2022
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2023
const localVarHeaderParameter = {} as any;
2024
const localVarQueryParameter = {} as any;
2025
2026
2027
2028
localVarHeaderParameter['Content-Type'] = 'application/json';
2029
2030
setSearchParams(localVarUrlObj, localVarQueryParameter);
2031
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2032
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2033
localVarRequestOptions.data = serializeDataIfNeeded(createCompletionRequest, localVarRequestOptions, configuration)
2034
2035
return {
2036
url: toPathString(localVarUrlObj),
2037
options: localVarRequestOptions,
2038
};
2039
},
2042
* @summary Creates a new edit for the provided input, instruction, and parameters.
2043
* @param {CreateEditRequest} createEditRequest
2044
* @param {*} [options] Override http request option.
2045
* @throws {RequiredError}
2046
*/
2047
createEdit: async (createEditRequest: CreateEditRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2048
// verify required parameter 'createEditRequest' is not null or undefined
2049
assertParamExists('createEdit', 'createEditRequest', createEditRequest)
2051
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2052
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2053
let baseOptions;
2054
if (configuration) {
2055
baseOptions = configuration.baseOptions;
2056
}
2057
2058
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2059
const localVarHeaderParameter = {} as any;
2060
const localVarQueryParameter = {} as any;
2061
2062
2063
2064
localVarHeaderParameter['Content-Type'] = 'application/json';
2065
2066
setSearchParams(localVarUrlObj, localVarQueryParameter);
2067
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2068
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2069
localVarRequestOptions.data = serializeDataIfNeeded(createEditRequest, localVarRequestOptions, configuration)
2070
2071
return {
2072
url: toPathString(localVarUrlObj),
2073
options: localVarRequestOptions,
2074
};
2075
},
2079
* @param {CreateEmbeddingRequest} createEmbeddingRequest
2080
* @param {*} [options] Override http request option.
2081
* @throws {RequiredError}
2082
*/
2083
createEmbedding: async (createEmbeddingRequest: CreateEmbeddingRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2084
// verify required parameter 'createEmbeddingRequest' is not null or undefined
2085
assertParamExists('createEmbedding', 'createEmbeddingRequest', createEmbeddingRequest)
2087
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2088
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2089
let baseOptions;
2090
if (configuration) {
2091
baseOptions = configuration.baseOptions;
2092
}
2093
2094
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2095
const localVarHeaderParameter = {} as any;
2096
const localVarQueryParameter = {} as any;
2097
2098
2099
2100
localVarHeaderParameter['Content-Type'] = 'application/json';
2101
2102
setSearchParams(localVarUrlObj, localVarQueryParameter);
2103
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2104
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2105
localVarRequestOptions.data = serializeDataIfNeeded(createEmbeddingRequest, localVarRequestOptions, configuration)
2106
2107
return {
2108
url: toPathString(localVarUrlObj),
2109
options: localVarRequestOptions,
2110
};
2111
},
2112
/**
2113
*
2114
* @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
2115
* @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \\\"fine-tune\\\", each line is a JSON record with \\\"prompt\\\" and \\\"completion\\\" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
2116
* @param {string} purpose The intended purpose of the uploaded documents. Use \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
2117
* @param {*} [options] Override http request option.
2118
* @throws {RequiredError}
2119
*/
2120
createFile: async (file: File, purpose: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2121
// verify required parameter 'file' is not null or undefined
2122
assertParamExists('createFile', 'file', file)
2123
// verify required parameter 'purpose' is not null or undefined
2124
assertParamExists('createFile', 'purpose', purpose)
2125
const localVarPath = `/files`;
2126
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2127
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2128
let baseOptions;
2129
if (configuration) {
2130
baseOptions = configuration.baseOptions;
2131
}
2132
2133
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2134
const localVarHeaderParameter = {} as any;
2135
const localVarQueryParameter = {} as any;
2136
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
2137
2138
2139
if (file !== undefined) {
2140
localVarFormParams.append('file', file as any);
2141
}
2142
2143
if (purpose !== undefined) {
2144
localVarFormParams.append('purpose', purpose as any);
2145
}
2146
2147
2148
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
2149
2150
setSearchParams(localVarUrlObj, localVarQueryParameter);
2151
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2152
localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
2153
localVarRequestOptions.data = localVarFormParams;
2154
2155
return {
2156
url: toPathString(localVarUrlObj),
2157
options: localVarRequestOptions,
2158
};
2159
},
2160
/**
2161
*
2162
* @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2163
* @param {CreateFineTuneRequest} createFineTuneRequest
2164
* @param {*} [options] Override http request option.
2165
* @throws {RequiredError}
2166
*/
2167
createFineTune: async (createFineTuneRequest: CreateFineTuneRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2168
// verify required parameter 'createFineTuneRequest' is not null or undefined
2169
assertParamExists('createFineTune', 'createFineTuneRequest', createFineTuneRequest)
2170
const localVarPath = `/fine-tunes`;
2171
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2172
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2173
let baseOptions;
2174
if (configuration) {
2175
baseOptions = configuration.baseOptions;
2176
}
2177
2178
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2179
const localVarHeaderParameter = {} as any;
2180
const localVarQueryParameter = {} as any;
2181
2182
2183
2184
localVarHeaderParameter['Content-Type'] = 'application/json';
2185
2186
setSearchParams(localVarUrlObj, localVarQueryParameter);
2187
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2188
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2189
localVarRequestOptions.data = serializeDataIfNeeded(createFineTuneRequest, localVarRequestOptions, configuration)
2190
2191
return {
2192
url: toPathString(localVarUrlObj),
2193
options: localVarRequestOptions,
2194
};
2195
},
2196
/**
2197
*
2198
* @summary Creates an image given a prompt.
2199
* @param {CreateImageRequest} createImageRequest
2200
* @param {*} [options] Override http request option.
2201
* @throws {RequiredError}
2202
*/
2203
createImage: async (createImageRequest: CreateImageRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2204
// verify required parameter 'createImageRequest' is not null or undefined
2205
assertParamExists('createImage', 'createImageRequest', createImageRequest)
2206
const localVarPath = `/images/generations`;
2207
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2208
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2209
let baseOptions;
2210
if (configuration) {
2211
baseOptions = configuration.baseOptions;
2212
}
2213
2214
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2215
const localVarHeaderParameter = {} as any;
2216
const localVarQueryParameter = {} as any;
2217
2218
2219
2220
localVarHeaderParameter['Content-Type'] = 'application/json';
2221
2222
setSearchParams(localVarUrlObj, localVarQueryParameter);
2223
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2224
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2225
localVarRequestOptions.data = serializeDataIfNeeded(createImageRequest, localVarRequestOptions, configuration)
2226
2227
return {
2228
url: toPathString(localVarUrlObj),
2229
options: localVarRequestOptions,
2230
};
2231
},
2232
/**
2233
*
2234
* @summary Creates an edited or extended image given an original image and a prompt.
2235
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
2236
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
2237
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
2238
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
2239
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
2240
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
2241
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2242
* @param {*} [options] Override http request option.
2243
* @throws {RequiredError}
2244
*/
2245
createImageEdit: async (image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2246
// verify required parameter 'image' is not null or undefined
2247
assertParamExists('createImageEdit', 'image', image)
2248
// verify required parameter 'prompt' is not null or undefined
2249
assertParamExists('createImageEdit', 'prompt', prompt)
2250
const localVarPath = `/images/edits`;
2251
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2252
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2253
let baseOptions;
2254
if (configuration) {
2255
baseOptions = configuration.baseOptions;
2256
}
2257
2258
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2259
const localVarHeaderParameter = {} as any;
2260
const localVarQueryParameter = {} as any;
2261
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
2262
2263
2264
if (image !== undefined) {
2265
localVarFormParams.append('image', image as any);
2266
}
2267
2268
if (mask !== undefined) {
2269
localVarFormParams.append('mask', mask as any);
2270
}
2271
2272
if (prompt !== undefined) {
2273
localVarFormParams.append('prompt', prompt as any);
2274
}
2275
2276
if (n !== undefined) {
2277
localVarFormParams.append('n', n as any);
2278
}
2279
2280
if (size !== undefined) {
2281
localVarFormParams.append('size', size as any);
2282
}
2283
2284
if (responseFormat !== undefined) {
2285
localVarFormParams.append('response_format', responseFormat as any);
2286
}
2287
2288
if (user !== undefined) {
2289
localVarFormParams.append('user', user as any);
2290
}
2291
2292
2293
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
2294
2295
setSearchParams(localVarUrlObj, localVarQueryParameter);
2296
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2297
localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
2298
localVarRequestOptions.data = localVarFormParams;
2299
2300
return {
2301
url: toPathString(localVarUrlObj),
2302
options: localVarRequestOptions,
2303
};
2304
},
2305
/**
2306
*
2307
* @summary Creates a variation of a given image.
2308
* @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
2309
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
2310
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
2311
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
2312
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
2313
* @param {*} [options] Override http request option.
2314
* @throws {RequiredError}
2315
*/
2316
createImageVariation: async (image: File, n?: number, size?: string, responseFormat?: string, user?: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2317
// verify required parameter 'image' is not null or undefined
2318
assertParamExists('createImageVariation', 'image', image)
2319
const localVarPath = `/images/variations`;
2320
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2321
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2322
let baseOptions;
2323
if (configuration) {
2324
baseOptions = configuration.baseOptions;
2325
}
2326
2327
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2328
const localVarHeaderParameter = {} as any;
2329
const localVarQueryParameter = {} as any;
2330
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
2331
2332
2333
if (image !== undefined) {
2334
localVarFormParams.append('image', image as any);
2335
}
2336
2337
if (n !== undefined) {
2338
localVarFormParams.append('n', n as any);
2339
}
2340
2341
if (size !== undefined) {
2342
localVarFormParams.append('size', size as any);
2343
}
2344
2345
if (responseFormat !== undefined) {
2346
localVarFormParams.append('response_format', responseFormat as any);
2347
}
2348
2349
if (user !== undefined) {
2350
localVarFormParams.append('user', user as any);
2351
}
2352
2353
2354
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
2355
2356
setSearchParams(localVarUrlObj, localVarQueryParameter);
2357
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2358
localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
2359
localVarRequestOptions.data = localVarFormParams;
2360
2361
return {
2362
url: toPathString(localVarUrlObj),
2363
options: localVarRequestOptions,
2364
};
2365
},
2366
/**
2367
*
2368
* @summary Classifies if text violates OpenAI\'s Content Policy
2369
* @param {CreateModerationRequest} createModerationRequest
2370
* @param {*} [options] Override http request option.
2371
* @throws {RequiredError}
2372
*/
2373
createModeration: async (createModerationRequest: CreateModerationRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2374
// verify required parameter 'createModerationRequest' is not null or undefined
2375
assertParamExists('createModeration', 'createModerationRequest', createModerationRequest)
2376
const localVarPath = `/moderations`;
2377
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2378
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2379
let baseOptions;
2380
if (configuration) {
2381
baseOptions = configuration.baseOptions;
2382
}
2383
2384
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2385
const localVarHeaderParameter = {} as any;
2386
const localVarQueryParameter = {} as any;
2387
2388
2389
2390
localVarHeaderParameter['Content-Type'] = 'application/json';
2391
2392
setSearchParams(localVarUrlObj, localVarQueryParameter);
2393
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2394
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2395
localVarRequestOptions.data = serializeDataIfNeeded(createModerationRequest, localVarRequestOptions, configuration)
2396
2397
return {
2398
url: toPathString(localVarUrlObj),
2399
options: localVarRequestOptions,
2400
};
2401
},
2402
/**
2403
*
2404
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
2405
* @param {string} engineId The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
2406
* @param {CreateSearchRequest} createSearchRequest
2407
* @param {*} [options] Override http request option.
2409
* @throws {RequiredError}
2410
*/
2411
createSearch: async (engineId: string, createSearchRequest: CreateSearchRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2412
// verify required parameter 'engineId' is not null or undefined
2413
assertParamExists('createSearch', 'engineId', engineId)
2414
// verify required parameter 'createSearchRequest' is not null or undefined
2415
assertParamExists('createSearch', 'createSearchRequest', createSearchRequest)
2416
const localVarPath = `/engines/{engine_id}/search`
2417
.replace(`{${"engine_id"}}`, encodeURIComponent(String(engineId)));
2418
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2419
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2420
let baseOptions;
2421
if (configuration) {
2422
baseOptions = configuration.baseOptions;
2423
}
2424
2425
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2426
const localVarHeaderParameter = {} as any;
2427
const localVarQueryParameter = {} as any;
2428
2429
2430
2431
localVarHeaderParameter['Content-Type'] = 'application/json';
2432
2433
setSearchParams(localVarUrlObj, localVarQueryParameter);
2434
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2435
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2436
localVarRequestOptions.data = serializeDataIfNeeded(createSearchRequest, localVarRequestOptions, configuration)
2437
2438
return {
2439
url: toPathString(localVarUrlObj),
2440
options: localVarRequestOptions,
2441
};
2442
},
2443
/**
2444
*
2445
* @summary Transcribes audio into the input language.
2446
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2447
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
2448
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
2449
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2450
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2451
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
2452
* @param {*} [options] Override http request option.
2453
* @throws {RequiredError}
2454
*/
2455
createTranscription: async (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2456
// verify required parameter 'file' is not null or undefined
2457
assertParamExists('createTranscription', 'file', file)
2458
// verify required parameter 'model' is not null or undefined
2459
assertParamExists('createTranscription', 'model', model)
2460
const localVarPath = `/audio/transcriptions`;
2461
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2462
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2463
let baseOptions;
2464
if (configuration) {
2465
baseOptions = configuration.baseOptions;
2466
}
2467
2468
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2469
const localVarHeaderParameter = {} as any;
2470
const localVarQueryParameter = {} as any;
2471
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
2472
2473
2474
if (file !== undefined) {
2475
localVarFormParams.append('file', file as any);
2476
}
2477
2478
if (model !== undefined) {
2479
localVarFormParams.append('model', model as any);
2480
}
2481
2482
if (prompt !== undefined) {
2483
localVarFormParams.append('prompt', prompt as any);
2484
}
2485
2486
if (responseFormat !== undefined) {
2487
localVarFormParams.append('response_format', responseFormat as any);
2488
}
2489
2490
if (temperature !== undefined) {
2491
localVarFormParams.append('temperature', temperature as any);
2492
}
2493
2494
if (language !== undefined) {
2495
localVarFormParams.append('language', language as any);
2496
}
2497
2498
2499
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
2500
2501
setSearchParams(localVarUrlObj, localVarQueryParameter);
2502
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2503
localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
2504
localVarRequestOptions.data = localVarFormParams;
2505
2506
return {
2507
url: toPathString(localVarUrlObj),
2508
options: localVarRequestOptions,
2509
};
2510
},
2511
/**
2512
*
2513
* @summary Translates audio into into English.
2514
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
2515
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
2516
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
2517
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
2518
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
2519
* @param {*} [options] Override http request option.
2520
* @throws {RequiredError}
2521
*/
2522
createTranslation: async (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2523
// verify required parameter 'file' is not null or undefined
2524
assertParamExists('createTranslation', 'file', file)
2525
// verify required parameter 'model' is not null or undefined
2526
assertParamExists('createTranslation', 'model', model)
2527
const localVarPath = `/audio/translations`;
2528
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2529
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2530
let baseOptions;
2531
if (configuration) {
2532
baseOptions = configuration.baseOptions;
2533
}
2534
2535
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
2536
const localVarHeaderParameter = {} as any;
2537
const localVarQueryParameter = {} as any;
2538
const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
2539
2540
2541
if (file !== undefined) {
2542
localVarFormParams.append('file', file as any);
2543
}
2544
2545
if (model !== undefined) {
2546
localVarFormParams.append('model', model as any);
2547
}
2548
2549
if (prompt !== undefined) {
2550
localVarFormParams.append('prompt', prompt as any);
2551
}
2552
2553
if (responseFormat !== undefined) {
2554
localVarFormParams.append('response_format', responseFormat as any);
2555
}
2556
2557
if (temperature !== undefined) {
2558
localVarFormParams.append('temperature', temperature as any);
2559
}
2560
2561
2562
localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
2563
2564
setSearchParams(localVarUrlObj, localVarQueryParameter);
2565
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2566
localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
2567
localVarRequestOptions.data = localVarFormParams;
2568
2569
return {
2570
url: toPathString(localVarUrlObj),
2571
options: localVarRequestOptions,
2572
};
2573
},
2574
/**
2575
*
2576
* @summary Delete a file.
2577
* @param {string} fileId The ID of the file to use for this request
2578
* @param {*} [options] Override http request option.
2579
* @throws {RequiredError}
2580
*/
2581
deleteFile: async (fileId: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2582
// verify required parameter 'fileId' is not null or undefined
2583
assertParamExists('deleteFile', 'fileId', fileId)
2584
const localVarPath = `/files/{file_id}`
2585
.replace(`{${"file_id"}}`, encodeURIComponent(String(fileId)));
2586
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2587
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2588
let baseOptions;
2589
if (configuration) {
2590
baseOptions = configuration.baseOptions;
2591
}
2592
2593
const localVarRequestOptions = { method: 'DELETE', ...baseOptions, ...options};
2594
const localVarHeaderParameter = {} as any;
2595
const localVarQueryParameter = {} as any;
2596
2597
2598
2599
setSearchParams(localVarUrlObj, localVarQueryParameter);
2600
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2601
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2602
2603
return {
2604
url: toPathString(localVarUrlObj),
2605
options: localVarRequestOptions,
2606
};
2607
},
2608
/**
2609
*
2610
* @summary Delete a fine-tuned model. You must have the Owner role in your organization.
2611
* @param {string} model The model to delete
2612
* @param {*} [options] Override http request option.
2613
* @throws {RequiredError}
2614
*/
2615
deleteModel: async (model: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2616
// verify required parameter 'model' is not null or undefined
2617
assertParamExists('deleteModel', 'model', model)
2618
const localVarPath = `/models/{model}`
2619
.replace(`{${"model"}}`, encodeURIComponent(String(model)));
2620
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2621
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2622
let baseOptions;
2623
if (configuration) {
2624
baseOptions = configuration.baseOptions;
2625
}
2626
2627
const localVarRequestOptions = { method: 'DELETE', ...baseOptions, ...options};
2628
const localVarHeaderParameter = {} as any;
2629
const localVarQueryParameter = {} as any;
2630
2631
2632
2633
setSearchParams(localVarUrlObj, localVarQueryParameter);
2634
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2635
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2636
2637
return {
2638
url: toPathString(localVarUrlObj),
2639
options: localVarRequestOptions,
2640
};
2641
},
2642
/**
2643
*
2644
* @summary Returns the contents of the specified file
2645
* @param {string} fileId The ID of the file to use for this request
2646
* @param {*} [options] Override http request option.
2647
* @throws {RequiredError}
2648
*/
2649
downloadFile: async (fileId: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2650
// verify required parameter 'fileId' is not null or undefined
2651
assertParamExists('downloadFile', 'fileId', fileId)
2652
const localVarPath = `/files/{file_id}/content`
2653
.replace(`{${"file_id"}}`, encodeURIComponent(String(fileId)));
2654
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2655
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2656
let baseOptions;
2657
if (configuration) {
2658
baseOptions = configuration.baseOptions;
2659
}
2660
2661
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2662
const localVarHeaderParameter = {} as any;
2663
const localVarQueryParameter = {} as any;
2664
2665
2666
2667
setSearchParams(localVarUrlObj, localVarQueryParameter);
2668
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2669
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2670
2671
return {
2672
url: toPathString(localVarUrlObj),
2673
options: localVarRequestOptions,
2674
};
2675
},
2676
/**
2677
*
2678
* @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
2681
* @throws {RequiredError}
2682
*/
2683
listEngines: async (options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2684
const localVarPath = `/engines`;
2685
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2686
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2687
let baseOptions;
2688
if (configuration) {
2689
baseOptions = configuration.baseOptions;
2690
}
2691
2692
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2693
const localVarHeaderParameter = {} as any;
2694
const localVarQueryParameter = {} as any;
2695
2696
2697
2698
setSearchParams(localVarUrlObj, localVarQueryParameter);
2699
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2700
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2701
2702
return {
2703
url: toPathString(localVarUrlObj),
2704
options: localVarRequestOptions,
2705
};
2706
},
2707
/**
2708
*
2709
* @summary Returns a list of files that belong to the user\'s organization.
2710
* @param {*} [options] Override http request option.
2711
* @throws {RequiredError}
2712
*/
2713
listFiles: async (options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2714
const localVarPath = `/files`;
2715
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2716
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2717
let baseOptions;
2718
if (configuration) {
2719
baseOptions = configuration.baseOptions;
2720
}
2721
2722
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2723
const localVarHeaderParameter = {} as any;
2724
const localVarQueryParameter = {} as any;
2725
2726
2727
2728
setSearchParams(localVarUrlObj, localVarQueryParameter);
2729
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2730
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2731
2732
return {
2733
url: toPathString(localVarUrlObj),
2734
options: localVarRequestOptions,
2735
};
2736
},
2737
/**
2738
*
2739
* @summary Get fine-grained status updates for a fine-tune job.
2740
* @param {string} fineTuneId The ID of the fine-tune job to get events for.
2741
* @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a `data: [DONE]` message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
2742
* @param {*} [options] Override http request option.
2743
* @throws {RequiredError}
2744
*/
2745
listFineTuneEvents: async (fineTuneId: string, stream?: boolean, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2746
// verify required parameter 'fineTuneId' is not null or undefined
2747
assertParamExists('listFineTuneEvents', 'fineTuneId', fineTuneId)
2748
const localVarPath = `/fine-tunes/{fine_tune_id}/events`
2749
.replace(`{${"fine_tune_id"}}`, encodeURIComponent(String(fineTuneId)));
2750
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2751
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2752
let baseOptions;
2753
if (configuration) {
2754
baseOptions = configuration.baseOptions;
2755
}
2756
2757
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2758
const localVarHeaderParameter = {} as any;
2759
const localVarQueryParameter = {} as any;
2760
2761
if (stream !== undefined) {
2762
localVarQueryParameter['stream'] = stream;
2763
}
2764
2765
2766
2767
setSearchParams(localVarUrlObj, localVarQueryParameter);
2768
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2769
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2770
2771
return {
2772
url: toPathString(localVarUrlObj),
2773
options: localVarRequestOptions,
2774
};
2775
},
2776
/**
2777
*
2778
* @summary List your organization\'s fine-tuning jobs
2779
* @param {*} [options] Override http request option.
2780
* @throws {RequiredError}
2781
*/
2782
listFineTunes: async (options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2783
const localVarPath = `/fine-tunes`;
2784
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2785
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2786
let baseOptions;
2787
if (configuration) {
2788
baseOptions = configuration.baseOptions;
2789
}
2790
2791
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2792
const localVarHeaderParameter = {} as any;
2793
const localVarQueryParameter = {} as any;
2794
2795
2796
2797
setSearchParams(localVarUrlObj, localVarQueryParameter);
2798
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2799
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2800
2801
return {
2802
url: toPathString(localVarUrlObj),
2803
options: localVarRequestOptions,
2804
};
2805
},
2806
/**
2807
*
2808
* @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
2809
* @param {*} [options] Override http request option.
2810
* @throws {RequiredError}
2811
*/
2812
listModels: async (options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2813
const localVarPath = `/models`;
2814
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2815
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2816
let baseOptions;
2817
if (configuration) {
2818
baseOptions = configuration.baseOptions;
2819
}
2820
2821
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2822
const localVarHeaderParameter = {} as any;
2823
const localVarQueryParameter = {} as any;
2824
2825
2826
2827
setSearchParams(localVarUrlObj, localVarQueryParameter);
2828
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2829
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2830
2831
return {
2832
url: toPathString(localVarUrlObj),
2833
options: localVarRequestOptions,
2834
};
2835
},
2836
/**
2837
*
2838
* @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
2839
* @param {string} engineId The ID of the engine to use for this request
2840
* @param {*} [options] Override http request option.
2842
* @throws {RequiredError}
2843
*/
2844
retrieveEngine: async (engineId: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2845
// verify required parameter 'engineId' is not null or undefined
2846
assertParamExists('retrieveEngine', 'engineId', engineId)
2847
const localVarPath = `/engines/{engine_id}`
2848
.replace(`{${"engine_id"}}`, encodeURIComponent(String(engineId)));
2849
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2850
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2851
let baseOptions;
2852
if (configuration) {
2853
baseOptions = configuration.baseOptions;
2854
}
2855
2856
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2857
const localVarHeaderParameter = {} as any;
2858
const localVarQueryParameter = {} as any;
2859
2860
2861
2862
setSearchParams(localVarUrlObj, localVarQueryParameter);
2863
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2864
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2865
2866
return {
2867
url: toPathString(localVarUrlObj),
2868
options: localVarRequestOptions,
2869
};
2870
},
2871
/**
2872
*
2873
* @summary Returns information about a specific file.
2874
* @param {string} fileId The ID of the file to use for this request
2875
* @param {*} [options] Override http request option.
2876
* @throws {RequiredError}
2877
*/
2878
retrieveFile: async (fileId: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2879
// verify required parameter 'fileId' is not null or undefined
2880
assertParamExists('retrieveFile', 'fileId', fileId)
2881
const localVarPath = `/files/{file_id}`
2882
.replace(`{${"file_id"}}`, encodeURIComponent(String(fileId)));
2883
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2884
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2885
let baseOptions;
2886
if (configuration) {
2887
baseOptions = configuration.baseOptions;
2888
}
2889
2890
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2891
const localVarHeaderParameter = {} as any;
2892
const localVarQueryParameter = {} as any;
2893
2894
2895
2896
setSearchParams(localVarUrlObj, localVarQueryParameter);
2897
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2898
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2899
2900
return {
2901
url: toPathString(localVarUrlObj),
2902
options: localVarRequestOptions,
2903
};
2904
},
2905
/**
2906
*
2907
* @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
2908
* @param {string} fineTuneId The ID of the fine-tune job
2909
* @param {*} [options] Override http request option.
2910
* @throws {RequiredError}
2911
*/
2912
retrieveFineTune: async (fineTuneId: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2913
// verify required parameter 'fineTuneId' is not null or undefined
2914
assertParamExists('retrieveFineTune', 'fineTuneId', fineTuneId)
2915
const localVarPath = `/fine-tunes/{fine_tune_id}`
2916
.replace(`{${"fine_tune_id"}}`, encodeURIComponent(String(fineTuneId)));
2917
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2918
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2919
let baseOptions;
2920
if (configuration) {
2921
baseOptions = configuration.baseOptions;
2922
}
2923
2924
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2925
const localVarHeaderParameter = {} as any;
2926
const localVarQueryParameter = {} as any;
2927
2928
2929
2930
setSearchParams(localVarUrlObj, localVarQueryParameter);
2931
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2932
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2933
2934
return {
2935
url: toPathString(localVarUrlObj),
2936
options: localVarRequestOptions,
2937
};
2938
},
2939
/**
2940
*
2941
* @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
2942
* @param {string} model The ID of the model to use for this request
2943
* @param {*} [options] Override http request option.
2944
* @throws {RequiredError}
2945
*/
2946
retrieveModel: async (model: string, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
2947
// verify required parameter 'model' is not null or undefined
2948
assertParamExists('retrieveModel', 'model', model)
2949
const localVarPath = `/models/{model}`
2950
.replace(`{${"model"}}`, encodeURIComponent(String(model)));
2951
// use dummy base URL string because the URL constructor only accepts absolute URLs.
2952
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
2953
let baseOptions;
2954
if (configuration) {
2955
baseOptions = configuration.baseOptions;
2956
}
2957
2958
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
2959
const localVarHeaderParameter = {} as any;
2960
const localVarQueryParameter = {} as any;
2961
2962
2963
2964
setSearchParams(localVarUrlObj, localVarQueryParameter);
2965
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
2966
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
2967
2968
return {
2969
url: toPathString(localVarUrlObj),
2970
options: localVarRequestOptions,
2971
};
2972
},
2973
}
2974
};
2975
2976
/**
2977
* OpenAIApi - functional programming interface
2978
* @export
2979
*/
2980
export const OpenAIApiFp = function(configuration?: Configuration) {
2981
const localVarAxiosParamCreator = OpenAIApiAxiosParamCreator(configuration)
2982
return {
2983
/**
2984
*
2985
* @summary Immediately cancel a fine-tune job.
2986
* @param {string} fineTuneId The ID of the fine-tune job to cancel
2987
* @param {*} [options] Override http request option.
2988
* @throws {RequiredError}
2989
*/
2990
async cancelFineTune(fineTuneId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<FineTune>> {
2991
const localVarAxiosArgs = await localVarAxiosParamCreator.cancelFineTune(fineTuneId, options);
2992
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
2993
},
2994
/**
2995
*
2996
* @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
2997
* @param {CreateAnswerRequest} createAnswerRequest
2998
* @param {*} [options] Override http request option.
3000
* @throws {RequiredError}
3001
*/
3002
async createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateAnswerResponse>> {
3003
const localVarAxiosArgs = await localVarAxiosParamCreator.createAnswer(createAnswerRequest, options);
3004
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3005
},
3006
/**
3007
*
3008
* @summary Creates a completion for the chat message
3009
* @param {CreateChatCompletionRequest} createChatCompletionRequest
3010
* @param {*} [options] Override http request option.
3011
* @throws {RequiredError}
3012
*/
3013
async createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateChatCompletionResponse>> {
3014
const localVarAxiosArgs = await localVarAxiosParamCreator.createChatCompletion(createChatCompletionRequest, options);
3015
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3016
},
3017
/**
3018
*
3019
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
3020
* @param {CreateClassificationRequest} createClassificationRequest
3021
* @param {*} [options] Override http request option.
3023
* @throws {RequiredError}
3024
*/
3025
async createClassification(createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateClassificationResponse>> {
3026
const localVarAxiosArgs = await localVarAxiosParamCreator.createClassification(createClassificationRequest, options);
3027
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3028
},
3029
/**
3030
*
3032
* @param {CreateCompletionRequest} createCompletionRequest
3033
* @param {*} [options] Override http request option.
3034
* @throws {RequiredError}
3035
*/
3036
async createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>> {
3037
const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletion(createCompletionRequest, options);
3038
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3039
},
3042
* @summary Creates a new edit for the provided input, instruction, and parameters.
3043
* @param {CreateEditRequest} createEditRequest
3044
* @param {*} [options] Override http request option.
3045
* @throws {RequiredError}
3046
*/
3047
async createEdit(createEditRequest: CreateEditRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateEditResponse>> {
3048
const localVarAxiosArgs = await localVarAxiosParamCreator.createEdit(createEditRequest, options);
3049
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3050
},
3054
* @param {CreateEmbeddingRequest} createEmbeddingRequest
3055
* @param {*} [options] Override http request option.
3056
* @throws {RequiredError}
3057
*/
3058
async createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateEmbeddingResponse>> {
3059
const localVarAxiosArgs = await localVarAxiosParamCreator.createEmbedding(createEmbeddingRequest, options);
3060
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3061
},
3062
/**
3063
*
3064
* @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
3065
* @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \\\"fine-tune\\\", each line is a JSON record with \\\"prompt\\\" and \\\"completion\\\" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
3066
* @param {string} purpose The intended purpose of the uploaded documents. Use \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
3067
* @param {*} [options] Override http request option.
3068
* @throws {RequiredError}
3069
*/
3070
async createFile(file: File, purpose: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<OpenAIFile>> {
3071
const localVarAxiosArgs = await localVarAxiosParamCreator.createFile(file, purpose, options);
3072
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3073
},
3074
/**
3075
*
3076
* @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
3077
* @param {CreateFineTuneRequest} createFineTuneRequest
3078
* @param {*} [options] Override http request option.
3079
* @throws {RequiredError}
3080
*/
3081
async createFineTune(createFineTuneRequest: CreateFineTuneRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<FineTune>> {
3082
const localVarAxiosArgs = await localVarAxiosParamCreator.createFineTune(createFineTuneRequest, options);
3083
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3084
},
3085
/**
3086
*
3087
* @summary Creates an image given a prompt.
3088
* @param {CreateImageRequest} createImageRequest
3089
* @param {*} [options] Override http request option.
3090
* @throws {RequiredError}
3091
*/
3092
async createImage(createImageRequest: CreateImageRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>> {
3093
const localVarAxiosArgs = await localVarAxiosParamCreator.createImage(createImageRequest, options);
3094
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3095
},
3096
/**
3097
*
3098
* @summary Creates an edited or extended image given an original image and a prompt.
3099
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
3100
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
3101
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
3102
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
3103
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
3104
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
3105
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
3106
* @param {*} [options] Override http request option.
3107
* @throws {RequiredError}
3108
*/
3109
async createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>> {
3110
const localVarAxiosArgs = await localVarAxiosParamCreator.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options);
3111
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3112
},
3113
/**
3114
*
3115
* @summary Creates a variation of a given image.
3116
* @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
3117
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
3118
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
3119
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
3120
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
3121
* @param {*} [options] Override http request option.
3122
* @throws {RequiredError}
3123
*/
3124
async createImageVariation(image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ImagesResponse>> {
3125
const localVarAxiosArgs = await localVarAxiosParamCreator.createImageVariation(image, n, size, responseFormat, user, options);
3126
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3127
},
3128
/**
3129
*
3130
* @summary Classifies if text violates OpenAI\'s Content Policy
3131
* @param {CreateModerationRequest} createModerationRequest
3132
* @param {*} [options] Override http request option.
3133
* @throws {RequiredError}
3134
*/
3135
async createModeration(createModerationRequest: CreateModerationRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateModerationResponse>> {
3136
const localVarAxiosArgs = await localVarAxiosParamCreator.createModeration(createModerationRequest, options);
3137
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3138
},
3139
/**
3140
*
3141
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
3142
* @param {string} engineId The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
3143
* @param {CreateSearchRequest} createSearchRequest
3144
* @param {*} [options] Override http request option.
3146
* @throws {RequiredError}
3147
*/
3148
async createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateSearchResponse>> {
3149
const localVarAxiosArgs = await localVarAxiosParamCreator.createSearch(engineId, createSearchRequest, options);
3150
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3151
},
3152
/**
3153
*
3154
* @summary Transcribes audio into the input language.
3155
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
3156
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
3157
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
3158
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
3159
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
3160
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
3161
* @param {*} [options] Override http request option.
3162
* @throws {RequiredError}
3163
*/
3164
async createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranscriptionResponse>> {
3165
const localVarAxiosArgs = await localVarAxiosParamCreator.createTranscription(file, model, prompt, responseFormat, temperature, language, options);
3166
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3167
},
3168
/**
3169
*
3170
* @summary Translates audio into into English.
3171
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
3172
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
3173
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
3174
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
3175
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
3176
* @param {*} [options] Override http request option.
3177
* @throws {RequiredError}
3178
*/
3179
async createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateTranslationResponse>> {
3180
const localVarAxiosArgs = await localVarAxiosParamCreator.createTranslation(file, model, prompt, responseFormat, temperature, options);
3181
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3182
},
3183
/**
3184
*
3185
* @summary Delete a file.
3186
* @param {string} fileId The ID of the file to use for this request
3187
* @param {*} [options] Override http request option.
3188
* @throws {RequiredError}
3189
*/
3190
async deleteFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DeleteFileResponse>> {
3191
const localVarAxiosArgs = await localVarAxiosParamCreator.deleteFile(fileId, options);
3192
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3193
},
3194
/**
3195
*
3196
* @summary Delete a fine-tuned model. You must have the Owner role in your organization.
3197
* @param {string} model The model to delete
3198
* @param {*} [options] Override http request option.
3199
* @throws {RequiredError}
3200
*/
3201
async deleteModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DeleteModelResponse>> {
3202
const localVarAxiosArgs = await localVarAxiosParamCreator.deleteModel(model, options);
3203
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3204
},
3205
/**
3206
*
3207
* @summary Returns the contents of the specified file
3208
* @param {string} fileId The ID of the file to use for this request
3209
* @param {*} [options] Override http request option.
3210
* @throws {RequiredError}
3211
*/
3212
async downloadFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<string>> {
3213
const localVarAxiosArgs = await localVarAxiosParamCreator.downloadFile(fileId, options);
3214
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3215
},
3216
/**
3217
*
3218
* @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
3221
* @throws {RequiredError}
3222
*/
3223
async listEngines(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListEnginesResponse>> {
3224
const localVarAxiosArgs = await localVarAxiosParamCreator.listEngines(options);
3225
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3226
},
3227
/**
3228
*
3229
* @summary Returns a list of files that belong to the user\'s organization.
3230
* @param {*} [options] Override http request option.
3231
* @throws {RequiredError}
3232
*/
3233
async listFiles(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListFilesResponse>> {
3234
const localVarAxiosArgs = await localVarAxiosParamCreator.listFiles(options);
3235
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3236
},
3237
/**
3238
*
3239
* @summary Get fine-grained status updates for a fine-tune job.
3240
* @param {string} fineTuneId The ID of the fine-tune job to get events for.
3241
* @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a `data: [DONE]` message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
3242
* @param {*} [options] Override http request option.
3243
* @throws {RequiredError}
3244
*/
3245
async listFineTuneEvents(fineTuneId: string, stream?: boolean, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListFineTuneEventsResponse>> {
3246
const localVarAxiosArgs = await localVarAxiosParamCreator.listFineTuneEvents(fineTuneId, stream, options);
3247
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3248
},
3249
/**
3250
*
3251
* @summary List your organization\'s fine-tuning jobs
3252
* @param {*} [options] Override http request option.
3253
* @throws {RequiredError}
3254
*/
3255
async listFineTunes(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListFineTunesResponse>> {
3256
const localVarAxiosArgs = await localVarAxiosParamCreator.listFineTunes(options);
3257
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3258
},
3259
/**
3260
*
3261
* @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
3262
* @param {*} [options] Override http request option.
3263
* @throws {RequiredError}
3264
*/
3265
async listModels(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<ListModelsResponse>> {
3266
const localVarAxiosArgs = await localVarAxiosParamCreator.listModels(options);
3267
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3268
},
3269
/**
3270
*
3271
* @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
3272
* @param {string} engineId The ID of the engine to use for this request
3273
* @param {*} [options] Override http request option.
3275
* @throws {RequiredError}
3276
*/
3277
async retrieveEngine(engineId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<Engine>> {
3278
const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveEngine(engineId, options);
3279
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3280
},
3281
/**
3282
*
3283
* @summary Returns information about a specific file.
3284
* @param {string} fileId The ID of the file to use for this request
3285
* @param {*} [options] Override http request option.
3286
* @throws {RequiredError}
3287
*/
3288
async retrieveFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<OpenAIFile>> {
3289
const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveFile(fileId, options);
3290
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3291
},
3292
/**
3293
*
3294
* @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
3295
* @param {string} fineTuneId The ID of the fine-tune job
3296
* @param {*} [options] Override http request option.
3297
* @throws {RequiredError}
3298
*/
3299
async retrieveFineTune(fineTuneId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<FineTune>> {
3300
const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveFineTune(fineTuneId, options);
3301
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3302
},
3303
/**
3304
*
3305
* @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
3306
* @param {string} model The ID of the model to use for this request
3307
* @param {*} [options] Override http request option.
3308
* @throws {RequiredError}
3309
*/
3310
async retrieveModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<Model>> {
3311
const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveModel(model, options);
3312
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
3313
},
3314
}
3315
};
3316
3317
/**
3318
* OpenAIApi - factory interface
3319
* @export
3320
*/
3321
export const OpenAIApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) {
3322
const localVarFp = OpenAIApiFp(configuration)
3323
return {
3324
/**
3325
*
3326
* @summary Immediately cancel a fine-tune job.
3327
* @param {string} fineTuneId The ID of the fine-tune job to cancel
3328
* @param {*} [options] Override http request option.
3329
* @throws {RequiredError}
3330
*/
3331
cancelFineTune(fineTuneId: string, options?: any): AxiosPromise<FineTune> {
3332
return localVarFp.cancelFineTune(fineTuneId, options).then((request) => request(axios, basePath));
3333
},
3334
/**
3335
*
3336
* @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
3337
* @param {CreateAnswerRequest} createAnswerRequest
3338
* @param {*} [options] Override http request option.
3340
* @throws {RequiredError}
3341
*/
3342
createAnswer(createAnswerRequest: CreateAnswerRequest, options?: any): AxiosPromise<CreateAnswerResponse> {
3343
return localVarFp.createAnswer(createAnswerRequest, options).then((request) => request(axios, basePath));
3344
},
3345
/**
3346
*
3347
* @summary Creates a completion for the chat message
3348
* @param {CreateChatCompletionRequest} createChatCompletionRequest
3349
* @param {*} [options] Override http request option.
3350
* @throws {RequiredError}
3351
*/
3352
createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: any): AxiosPromise<CreateChatCompletionResponse> {
3353
return localVarFp.createChatCompletion(createChatCompletionRequest, options).then((request) => request(axios, basePath));
3354
},
3355
/**
3356
*
3357
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
3358
* @param {CreateClassificationRequest} createClassificationRequest
3359
* @param {*} [options] Override http request option.
3361
* @throws {RequiredError}
3362
*/
3363
createClassification(createClassificationRequest: CreateClassificationRequest, options?: any): AxiosPromise<CreateClassificationResponse> {
3364
return localVarFp.createClassification(createClassificationRequest, options).then((request) => request(axios, basePath));
3365
},
3366
/**
3367
*
3369
* @param {CreateCompletionRequest} createCompletionRequest
3370
* @param {*} [options] Override http request option.
3371
* @throws {RequiredError}
3372
*/
3373
createCompletion(createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
3374
return localVarFp.createCompletion(createCompletionRequest, options).then((request) => request(axios, basePath));
3378
* @summary Creates a new edit for the provided input, instruction, and parameters.
3379
* @param {CreateEditRequest} createEditRequest
3380
* @param {*} [options] Override http request option.
3381
* @throws {RequiredError}
3382
*/
3383
createEdit(createEditRequest: CreateEditRequest, options?: any): AxiosPromise<CreateEditResponse> {
3384
return localVarFp.createEdit(createEditRequest, options).then((request) => request(axios, basePath));
3389
* @param {CreateEmbeddingRequest} createEmbeddingRequest
3390
* @param {*} [options] Override http request option.
3391
* @throws {RequiredError}
3392
*/
3393
createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: any): AxiosPromise<CreateEmbeddingResponse> {
3394
return localVarFp.createEmbedding(createEmbeddingRequest, options).then((request) => request(axios, basePath));
3395
},
3396
/**
3397
*
3398
* @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
3399
* @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \\\"fine-tune\\\", each line is a JSON record with \\\"prompt\\\" and \\\"completion\\\" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
3400
* @param {string} purpose The intended purpose of the uploaded documents. Use \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
3401
* @param {*} [options] Override http request option.
3402
* @throws {RequiredError}
3403
*/
3404
createFile(file: File, purpose: string, options?: any): AxiosPromise<OpenAIFile> {
3405
return localVarFp.createFile(file, purpose, options).then((request) => request(axios, basePath));
3406
},
3407
/**
3408
*
3409
* @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
3410
* @param {CreateFineTuneRequest} createFineTuneRequest
3411
* @param {*} [options] Override http request option.
3412
* @throws {RequiredError}
3413
*/
3414
createFineTune(createFineTuneRequest: CreateFineTuneRequest, options?: any): AxiosPromise<FineTune> {
3415
return localVarFp.createFineTune(createFineTuneRequest, options).then((request) => request(axios, basePath));
3416
},
3417
/**
3418
*
3419
* @summary Creates an image given a prompt.
3420
* @param {CreateImageRequest} createImageRequest
3421
* @param {*} [options] Override http request option.
3422
* @throws {RequiredError}
3423
*/
3424
createImage(createImageRequest: CreateImageRequest, options?: any): AxiosPromise<ImagesResponse> {
3425
return localVarFp.createImage(createImageRequest, options).then((request) => request(axios, basePath));
3426
},
3427
/**
3428
*
3429
* @summary Creates an edited or extended image given an original image and a prompt.
3430
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
3431
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
3432
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
3433
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
3434
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
3435
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
3436
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
3437
* @param {*} [options] Override http request option.
3438
* @throws {RequiredError}
3439
*/
3440
createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise<ImagesResponse> {
3441
return localVarFp.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(axios, basePath));
3442
},
3443
/**
3444
*
3445
* @summary Creates a variation of a given image.
3446
* @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
3447
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
3448
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
3449
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
3450
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
3451
* @param {*} [options] Override http request option.
3452
* @throws {RequiredError}
3453
*/
3454
createImageVariation(image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise<ImagesResponse> {
3455
return localVarFp.createImageVariation(image, n, size, responseFormat, user, options).then((request) => request(axios, basePath));
3456
},
3457
/**
3458
*
3459
* @summary Classifies if text violates OpenAI\'s Content Policy
3460
* @param {CreateModerationRequest} createModerationRequest
3461
* @param {*} [options] Override http request option.
3462
* @throws {RequiredError}
3463
*/
3464
createModeration(createModerationRequest: CreateModerationRequest, options?: any): AxiosPromise<CreateModerationResponse> {
3465
return localVarFp.createModeration(createModerationRequest, options).then((request) => request(axios, basePath));
3466
},
3467
/**
3468
*
3469
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
3470
* @param {string} engineId The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
3471
* @param {CreateSearchRequest} createSearchRequest
3472
* @param {*} [options] Override http request option.
3474
* @throws {RequiredError}
3475
*/
3476
createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: any): AxiosPromise<CreateSearchResponse> {
3477
return localVarFp.createSearch(engineId, createSearchRequest, options).then((request) => request(axios, basePath));
3478
},
3479
/**
3480
*
3481
* @summary Transcribes audio into the input language.
3482
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
3483
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
3484
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
3485
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
3486
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
3487
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
3488
* @param {*} [options] Override http request option.
3489
* @throws {RequiredError}
3490
*/
3491
createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: any): AxiosPromise<CreateTranscriptionResponse> {
3492
return localVarFp.createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(axios, basePath));
3493
},
3494
/**
3495
*
3496
* @summary Translates audio into into English.
3497
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
3498
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
3499
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
3500
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
3501
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
3502
* @param {*} [options] Override http request option.
3503
* @throws {RequiredError}
3504
*/
3505
createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: any): AxiosPromise<CreateTranslationResponse> {
3506
return localVarFp.createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(axios, basePath));
3507
},
3508
/**
3509
*
3510
* @summary Delete a file.
3511
* @param {string} fileId The ID of the file to use for this request
3512
* @param {*} [options] Override http request option.
3513
* @throws {RequiredError}
3514
*/
3515
deleteFile(fileId: string, options?: any): AxiosPromise<DeleteFileResponse> {
3516
return localVarFp.deleteFile(fileId, options).then((request) => request(axios, basePath));
3517
},
3518
/**
3519
*
3520
* @summary Delete a fine-tuned model. You must have the Owner role in your organization.
3521
* @param {string} model The model to delete
3522
* @param {*} [options] Override http request option.
3523
* @throws {RequiredError}
3524
*/
3525
deleteModel(model: string, options?: any): AxiosPromise<DeleteModelResponse> {
3526
return localVarFp.deleteModel(model, options).then((request) => request(axios, basePath));
3527
},
3528
/**
3529
*
3530
* @summary Returns the contents of the specified file
3531
* @param {string} fileId The ID of the file to use for this request
3532
* @param {*} [options] Override http request option.
3533
* @throws {RequiredError}
3534
*/
3535
downloadFile(fileId: string, options?: any): AxiosPromise<string> {
3536
return localVarFp.downloadFile(fileId, options).then((request) => request(axios, basePath));
3537
},
3538
/**
3539
*
3540
* @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
3543
* @throws {RequiredError}
3544
*/
3545
listEngines(options?: any): AxiosPromise<ListEnginesResponse> {
3546
return localVarFp.listEngines(options).then((request) => request(axios, basePath));
3547
},
3548
/**
3549
*
3550
* @summary Returns a list of files that belong to the user\'s organization.
3551
* @param {*} [options] Override http request option.
3552
* @throws {RequiredError}
3553
*/
3554
listFiles(options?: any): AxiosPromise<ListFilesResponse> {
3555
return localVarFp.listFiles(options).then((request) => request(axios, basePath));
3556
},
3557
/**
3558
*
3559
* @summary Get fine-grained status updates for a fine-tune job.
3560
* @param {string} fineTuneId The ID of the fine-tune job to get events for.
3561
* @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a `data: [DONE]` message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
3562
* @param {*} [options] Override http request option.
3563
* @throws {RequiredError}
3564
*/
3565
listFineTuneEvents(fineTuneId: string, stream?: boolean, options?: any): AxiosPromise<ListFineTuneEventsResponse> {
3566
return localVarFp.listFineTuneEvents(fineTuneId, stream, options).then((request) => request(axios, basePath));
3567
},
3568
/**
3569
*
3570
* @summary List your organization\'s fine-tuning jobs
3571
* @param {*} [options] Override http request option.
3572
* @throws {RequiredError}
3573
*/
3574
listFineTunes(options?: any): AxiosPromise<ListFineTunesResponse> {
3575
return localVarFp.listFineTunes(options).then((request) => request(axios, basePath));
3576
},
3577
/**
3578
*
3579
* @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
3580
* @param {*} [options] Override http request option.
3581
* @throws {RequiredError}
3582
*/
3583
listModels(options?: any): AxiosPromise<ListModelsResponse> {
3584
return localVarFp.listModels(options).then((request) => request(axios, basePath));
3585
},
3586
/**
3587
*
3588
* @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
3589
* @param {string} engineId The ID of the engine to use for this request
3590
* @param {*} [options] Override http request option.
3592
* @throws {RequiredError}
3593
*/
3594
retrieveEngine(engineId: string, options?: any): AxiosPromise<Engine> {
3595
return localVarFp.retrieveEngine(engineId, options).then((request) => request(axios, basePath));
3596
},
3597
/**
3598
*
3599
* @summary Returns information about a specific file.
3600
* @param {string} fileId The ID of the file to use for this request
3601
* @param {*} [options] Override http request option.
3602
* @throws {RequiredError}
3603
*/
3604
retrieveFile(fileId: string, options?: any): AxiosPromise<OpenAIFile> {
3605
return localVarFp.retrieveFile(fileId, options).then((request) => request(axios, basePath));
3606
},
3607
/**
3608
*
3609
* @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
3610
* @param {string} fineTuneId The ID of the fine-tune job
3611
* @param {*} [options] Override http request option.
3612
* @throws {RequiredError}
3613
*/
3614
retrieveFineTune(fineTuneId: string, options?: any): AxiosPromise<FineTune> {
3615
return localVarFp.retrieveFineTune(fineTuneId, options).then((request) => request(axios, basePath));
3616
},
3617
/**
3618
*
3619
* @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
3620
* @param {string} model The ID of the model to use for this request
3621
* @param {*} [options] Override http request option.
3622
* @throws {RequiredError}
3623
*/
3624
retrieveModel(model: string, options?: any): AxiosPromise<Model> {
3625
return localVarFp.retrieveModel(model, options).then((request) => request(axios, basePath));
3626
},
3627
};
3628
};
3629
3630
/**
3631
* OpenAIApi - object-oriented interface
3632
* @export
3633
* @class OpenAIApi
3634
* @extends {BaseAPI}
3635
*/
3636
export class OpenAIApi extends BaseAPI {
3637
/**
3638
*
3639
* @summary Immediately cancel a fine-tune job.
3640
* @param {string} fineTuneId The ID of the fine-tune job to cancel
3641
* @param {*} [options] Override http request option.
3642
* @throws {RequiredError}
3643
* @memberof OpenAIApi
3644
*/
3645
public cancelFineTune(fineTuneId: string, options?: AxiosRequestConfig) {
3646
return OpenAIApiFp(this.configuration).cancelFineTune(fineTuneId, options).then((request) => request(this.axios, this.basePath));
3647
}
3648
3649
/**
3650
*
3651
* @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
3652
* @param {CreateAnswerRequest} createAnswerRequest
3653
* @param {*} [options] Override http request option.
3655
* @throws {RequiredError}
3656
* @memberof OpenAIApi
3657
*/
3658
public createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig) {
3659
return OpenAIApiFp(this.configuration).createAnswer(createAnswerRequest, options).then((request) => request(this.axios, this.basePath));
3660
}
3661
3662
/**
3663
*
3664
* @summary Creates a completion for the chat message
3665
* @param {CreateChatCompletionRequest} createChatCompletionRequest
3666
* @param {*} [options] Override http request option.
3667
* @throws {RequiredError}
3668
* @memberof OpenAIApi
3669
*/
3670
public createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig) {
3671
return OpenAIApiFp(this.configuration).createChatCompletion(createChatCompletionRequest, options).then((request) => request(this.axios, this.basePath));
3672
}
3673
3674
/**
3675
*
3676
* @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
3677
* @param {CreateClassificationRequest} createClassificationRequest
3678
* @param {*} [options] Override http request option.
3680
* @throws {RequiredError}
3681
* @memberof OpenAIApi
3682
*/
3683
public createClassification(createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig) {
3684
return OpenAIApiFp(this.configuration).createClassification(createClassificationRequest, options).then((request) => request(this.axios, this.basePath));
3685
}
3686
3687
/**
3688
*
3690
* @param {CreateCompletionRequest} createCompletionRequest
3691
* @param {*} [options] Override http request option.
3692
* @throws {RequiredError}
3693
* @memberof OpenAIApi
3694
*/
3695
public createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig) {
3696
return OpenAIApiFp(this.configuration).createCompletion(createCompletionRequest, options).then((request) => request(this.axios, this.basePath));
3701
* @summary Creates a new edit for the provided input, instruction, and parameters.
3702
* @param {CreateEditRequest} createEditRequest
3703
* @param {*} [options] Override http request option.
3704
* @throws {RequiredError}
3705
* @memberof OpenAIApi
3706
*/
3707
public createEdit(createEditRequest: CreateEditRequest, options?: AxiosRequestConfig) {
3708
return OpenAIApiFp(this.configuration).createEdit(createEditRequest, options).then((request) => request(this.axios, this.basePath));
3714
* @param {CreateEmbeddingRequest} createEmbeddingRequest
3715
* @param {*} [options] Override http request option.
3716
* @throws {RequiredError}
3717
* @memberof OpenAIApi
3718
*/
3719
public createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig) {
3720
return OpenAIApiFp(this.configuration).createEmbedding(createEmbeddingRequest, options).then((request) => request(this.axios, this.basePath));
3721
}
3722
3723
/**
3724
*
3725
* @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
3726
* @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \\\"fine-tune\\\", each line is a JSON record with \\\"prompt\\\" and \\\"completion\\\" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
3727
* @param {string} purpose The intended purpose of the uploaded documents. Use \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
3728
* @param {*} [options] Override http request option.
3729
* @throws {RequiredError}
3730
* @memberof OpenAIApi
3731
*/
3732
public createFile(file: File, purpose: string, options?: AxiosRequestConfig) {
3733
return OpenAIApiFp(this.configuration).createFile(file, purpose, options).then((request) => request(this.axios, this.basePath));
3734
}
3735
3736
/**
3737
*
3738
* @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
3739
* @param {CreateFineTuneRequest} createFineTuneRequest
3740
* @param {*} [options] Override http request option.
3741
* @throws {RequiredError}
3742
* @memberof OpenAIApi
3743
*/
3744
public createFineTune(createFineTuneRequest: CreateFineTuneRequest, options?: AxiosRequestConfig) {
3745
return OpenAIApiFp(this.configuration).createFineTune(createFineTuneRequest, options).then((request) => request(this.axios, this.basePath));
3746
}
3747
3748
/**
3749
*
3750
* @summary Creates an image given a prompt.
3751
* @param {CreateImageRequest} createImageRequest
3752
* @param {*} [options] Override http request option.
3753
* @throws {RequiredError}
3754
* @memberof OpenAIApi
3755
*/
3756
public createImage(createImageRequest: CreateImageRequest, options?: AxiosRequestConfig) {
3757
return OpenAIApiFp(this.configuration).createImage(createImageRequest, options).then((request) => request(this.axios, this.basePath));
3758
}
3759
3760
/**
3761
*
3762
* @summary Creates an edited or extended image given an original image and a prompt.
3763
* @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
3764
* @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
3765
* @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
3766
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
3767
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
3768
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
3769
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
3770
* @param {*} [options] Override http request option.
3771
* @throws {RequiredError}
3772
* @memberof OpenAIApi
3773
*/
3774
public createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) {
3775
return OpenAIApiFp(this.configuration).createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath));
3776
}
3777
3778
/**
3779
*
3780
* @summary Creates a variation of a given image.
3781
* @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
3782
* @param {number} [n] The number of images to generate. Must be between 1 and 10.
3783
* @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
3784
* @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
3785
* @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
3786
* @param {*} [options] Override http request option.
3787
* @throws {RequiredError}
3788
* @memberof OpenAIApi
3789
*/
3790
public createImageVariation(image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) {
3791
return OpenAIApiFp(this.configuration).createImageVariation(image, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath));
3792
}
3793
3794
/**
3795
*
3796
* @summary Classifies if text violates OpenAI\'s Content Policy
3797
* @param {CreateModerationRequest} createModerationRequest
3798
* @param {*} [options] Override http request option.
3799
* @throws {RequiredError}
3800
* @memberof OpenAIApi
3801
*/
3802
public createModeration(createModerationRequest: CreateModerationRequest, options?: AxiosRequestConfig) {
3803
return OpenAIApiFp(this.configuration).createModeration(createModerationRequest, options).then((request) => request(this.axios, this.basePath));
3804
}
3805
3806
/**
3807
*
3808
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
3809
* @param {string} engineId The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
3810
* @param {CreateSearchRequest} createSearchRequest
3811
* @param {*} [options] Override http request option.
3813
* @throws {RequiredError}
3814
* @memberof OpenAIApi
3815
*/
3816
public createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig) {
3817
return OpenAIApiFp(this.configuration).createSearch(engineId, createSearchRequest, options).then((request) => request(this.axios, this.basePath));
3818
}
3819
3820
/**
3821
*
3822
* @summary Transcribes audio into the input language.
3823
* @param {File} file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
3824
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
3825
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
3826
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
3827
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
3828
* @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
3829
* @param {*} [options] Override http request option.
3830
* @throws {RequiredError}
3831
* @memberof OpenAIApi
3832
*/
3833
public createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig) {
3834
return OpenAIApiFp(this.configuration).createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(this.axios, this.basePath));
3835
}
3836
3837
/**
3838
*
3839
* @summary Translates audio into into English.
3840
* @param {File} file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
3841
* @param {string} model ID of the model to use. Only `whisper-1` is currently available.
3842
* @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
3843
* @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
3844
* @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
3845
* @param {*} [options] Override http request option.
3846
* @throws {RequiredError}
3847
* @memberof OpenAIApi
3848
*/
3849
public createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig) {
3850
return OpenAIApiFp(this.configuration).createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(this.axios, this.basePath));
3851
}
3852
3853
/**
3854
*
3855
* @summary Delete a file.
3856
* @param {string} fileId The ID of the file to use for this request
3857
* @param {*} [options] Override http request option.
3858
* @throws {RequiredError}
3859
* @memberof OpenAIApi
3860
*/
3861
public deleteFile(fileId: string, options?: AxiosRequestConfig) {
3862
return OpenAIApiFp(this.configuration).deleteFile(fileId, options).then((request) => request(this.axios, this.basePath));
3863
}
3864
3865
/**
3866
*
3867
* @summary Delete a fine-tuned model. You must have the Owner role in your organization.
3868
* @param {string} model The model to delete
3869
* @param {*} [options] Override http request option.
3870
* @throws {RequiredError}
3871
* @memberof OpenAIApi
3872
*/
3873
public deleteModel(model: string, options?: AxiosRequestConfig) {
3874
return OpenAIApiFp(this.configuration).deleteModel(model, options).then((request) => request(this.axios, this.basePath));
3875
}
3876
3877
/**
3878
*
3879
* @summary Returns the contents of the specified file
3880
* @param {string} fileId The ID of the file to use for this request
3881
* @param {*} [options] Override http request option.
3882
* @throws {RequiredError}
3883
* @memberof OpenAIApi
3884
*/
3885
public downloadFile(fileId: string, options?: AxiosRequestConfig) {
3886
return OpenAIApiFp(this.configuration).downloadFile(fileId, options).then((request) => request(this.axios, this.basePath));
3887
}
3888
3889
/**
3890
*
3891
* @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
3894
* @throws {RequiredError}
3895
* @memberof OpenAIApi
3896
*/
3897
public listEngines(options?: AxiosRequestConfig) {
3898
return OpenAIApiFp(this.configuration).listEngines(options).then((request) => request(this.axios, this.basePath));
3899
}
3900
3901
/**
3902
*
3903
* @summary Returns a list of files that belong to the user\'s organization.
3904
* @param {*} [options] Override http request option.
3905
* @throws {RequiredError}
3906
* @memberof OpenAIApi
3907
*/
3908
public listFiles(options?: AxiosRequestConfig) {
3909
return OpenAIApiFp(this.configuration).listFiles(options).then((request) => request(this.axios, this.basePath));
3910
}
3911
3912
/**
3913
*
3914
* @summary Get fine-grained status updates for a fine-tune job.
3915
* @param {string} fineTuneId The ID of the fine-tune job to get events for.
3916
* @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a `data: [DONE]` message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
3917
* @param {*} [options] Override http request option.
3918
* @throws {RequiredError}
3919
* @memberof OpenAIApi
3920
*/
3921
public listFineTuneEvents(fineTuneId: string, stream?: boolean, options?: AxiosRequestConfig) {
3922
return OpenAIApiFp(this.configuration).listFineTuneEvents(fineTuneId, stream, options).then((request) => request(this.axios, this.basePath));
3923
}
3924
3925
/**
3926
*
3927
* @summary List your organization\'s fine-tuning jobs
3928
* @param {*} [options] Override http request option.
3929
* @throws {RequiredError}
3930
* @memberof OpenAIApi
3931
*/
3932
public listFineTunes(options?: AxiosRequestConfig) {
3933
return OpenAIApiFp(this.configuration).listFineTunes(options).then((request) => request(this.axios, this.basePath));
3934
}
3935
3936
/**
3937
*
3938
* @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
3939
* @param {*} [options] Override http request option.
3940
* @throws {RequiredError}
3941
* @memberof OpenAIApi
3942
*/
3943
public listModels(options?: AxiosRequestConfig) {
3944
return OpenAIApiFp(this.configuration).listModels(options).then((request) => request(this.axios, this.basePath));
3945
}
3946
3947
/**
3948
*
3949
* @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
3950
* @param {string} engineId The ID of the engine to use for this request
3951
* @param {*} [options] Override http request option.
3953
* @throws {RequiredError}
3954
* @memberof OpenAIApi
3955
*/
3956
public retrieveEngine(engineId: string, options?: AxiosRequestConfig) {
3957
return OpenAIApiFp(this.configuration).retrieveEngine(engineId, options).then((request) => request(this.axios, this.basePath));
3958
}
3959
3960
/**
3961
*
3962
* @summary Returns information about a specific file.
3963
* @param {string} fileId The ID of the file to use for this request
3964
* @param {*} [options] Override http request option.
3965
* @throws {RequiredError}
3966
* @memberof OpenAIApi
3967
*/
3968
public retrieveFile(fileId: string, options?: AxiosRequestConfig) {
3969
return OpenAIApiFp(this.configuration).retrieveFile(fileId, options).then((request) => request(this.axios, this.basePath));
3970
}
3971
3972
/**
3973
*
3974
* @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
3975
* @param {string} fineTuneId The ID of the fine-tune job
3976
* @param {*} [options] Override http request option.
3977
* @throws {RequiredError}
3978
* @memberof OpenAIApi
3979
*/
3980
public retrieveFineTune(fineTuneId: string, options?: AxiosRequestConfig) {
3981
return OpenAIApiFp(this.configuration).retrieveFineTune(fineTuneId, options).then((request) => request(this.axios, this.basePath));
3982
}
3983
3984
/**
3985
*
3986
* @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
3987
* @param {string} model The ID of the model to use for this request
3988
* @param {*} [options] Override http request option.
3989
* @throws {RequiredError}
3990
* @memberof OpenAIApi
3991
*/
3992
public retrieveModel(model: string, options?: AxiosRequestConfig) {
3993
return OpenAIApiFp(this.configuration).retrieveModel(model, options).then((request) => request(this.axios, this.basePath));
3994
}