/
OpenAI.xml
179 lines (168 loc) · 9.63 KB
/
OpenAI.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
<?xml version="1.0" encoding="utf-8" ?>
<Suite Title="OpenAI" Id="OpenAI" Category="AI" RequireVersion="9.7.1" SourceUrl="https://github.com/nielsbosma/SeoTools-for-Excel-Connectors/blob/master/OpenAI.xml" HelpUrl="http://seotoolsforexcel.com/openai/" HelpText="Documentation">
<Author Name="Niels Bosma" Url="https://se.linkedin.com/in/bosmaniels"/>
<Settings HelpText="What's this?" HelpUrl="http://seotoolsforexcel.com/openai/">
<Text Id="ApiKey" Title="API Key" Required="true"/>
</Settings>
<Resources>
<Resource Id="HttpProps">
<Cache>true</Cache>
<CacheKeyComponents>Url;RequestBody</CacheKeyComponents>
<RequestContentType>application/json</RequestContentType>
<RequestHeaders>
<Header Name='Authorization'>Bearer @(Model.ApiKey)</Header>
</RequestHeaders>
</Resource>
<Resource Id="Fail">
<Fail>
<JsonPath Expr="$.error.message"/>
</Fail>
</Resource>
</Resources>
<RestConnector Id="Completions" Title="Completions" HelpUrl="https://beta.openai.com/docs/api-reference/completions/create">
<Parameters>
<Text Id="Prompt" Debug.DefaultValue="mimetype of filetype bmp" Required="true" Multiline="true"/>
<Text Id="PrependPrompt" Title="Prepend Prompt" Multiline="true" HelpText="Added before the prompt. Useful with formulas."/>
<Text Id="AppendPrompt" Title="Append Prompt" Multiline="true" HelpText="Added after the prompt. Useful with formulas."/>
<Select Id="Model" DefaultValue="text-davinci-003" Required="true">
<DataSource>
<Item Id="text-davinci-003"/>
<Item Id="text-curie-001"/>
<Item Id="text-babbage-001"/>
<Item Id="text-ada-001"/>
<Item Id="code-davinci-002"/>
<Item Id="code-cushman-001"/>
</DataSource>
</Select>
<Number Id="Temperature" Minimum="0" DefaultValue="0" Maximum="1" Format="N1" HelpText="Controls randomness: Lowering results in fewer random completions. As the temperature approaches zero, the model will become deterministic and repetitive."/>
<Number Id="MaxTokens" Title="Maximum Length" Minimum="1" DefaultValue="64" Maximum="4000" HelpText="The maximum number of tokens to generate. Requests can use up to 2,048 or 4,000 tokens shared between prompt and completion. The exact limit varies by model. (One token is roughly 4 characters for normal English text.)"/>
<Text Id="StopSequence" Title="Stop Sequence" HelpText="Up to four sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence."/>
<Number Id="TopP" Title="Top P" Minimum="0" DefaultValue="1" Maximum="1" Format="N1" HelpText="Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered."/>
<Number Id="Frequency" Minimum="0" DefaultValue="0" Maximum="2" Format="N1" HelpText="How much to penalize new tokens based on their existing frequency in the text so far. Decreases the model's likelihood to repeat the same line verbatim."/>
<Number Id="PresencePenalty" Title="Presence Penalty" Minimum="0" DefaultValue="0" Maximum="2" Format="N1" HelpText="How much to penalize new tokens based on whether they appear in the text so far. Increases the model's likelihood to talk about new topics."/>
<Number Id="BestOf" Title="Best Of" Minimum="1" DefaultValue="1" Maximum="20" HelpText="Generates multiple completions server-side, and displays only the best. Streaming only works when set to 1. Since it acts as a multiplier on the number of completions, this parameter can eat into your token quota very quickly — use caution!"/>
<Number Id="Delay" Title="Delay Between Requests (ms)" DefaultValue="1000" Minimum="0" Maximum="1000000" Format="N0" HelpText="The delay between requests in milliseconds. This is useful when you want to avoid hitting the API rate limit." HelpUrl="https://help.openai.com/en/articles/5955598-is-api-usage-subject-to-any-rate-limits"/>
</Parameters>
<Fetch>
<HttpSettings>
<Resource Id="HttpProps"/>
<RequestMethod>POST</RequestMethod>
<IntervalBetweenRequests RandomFrom="@(Model.Delay)" RandomTo="@(Model.Delay)" IfSame="Host"/>
<RequestBody>
<![CDATA[
@{
var dict = new Dictionary<string, object>();
dict.Add("model", Model.Model);
dict.Add("prompt", (Model.PrependPrompt + Model.Prompt + Model.AppendPrompt).Trim());
dict.Add("temperature", Model.Temperature);
dict.Add("max_tokens", (int)Model.MaxTokens);
dict.Add("top_p", Model.TopP);
dict.Add("frequency_penalty", Model.Frequency);
dict.Add("presence_penalty", Model.PresencePenalty);
dict.Add("best_of", (int)Model.BestOf);
if(!string.IsNullOrEmpty(Model.StopSequence))
{
dict.Add("stop", new[] { Model.StopSequence });
}
}
@JsonConvert.SerializeObject(dict)
]]>
</RequestBody>
</HttpSettings>
<Fetch.Url>
<![CDATA[
https://api.openai.com/v1/completions
]]>
</Fetch.Url>
</Fetch>
<Parse>
<JsonPath Expr="$.choices[0].text" Id="Text"/>
</Parse>
<Resource Id="Fail"/>
</RestConnector>
<RestConnector Id="Chat" Title="Chat" HelpUrl="https://platform.openai.com/docs/api-reference/chat/create">
<Parameters>
<Text Id="Message" Debug.DefaultValue="mimetype of filetype bmp" Required="true" Multiline="true"/>
<Text Id="Functions" Multiline="true"/>
<Text Id="PrependMessage" Title="Prepend Message" Multiline="true" HelpText="Added before the prompt. Useful with formulas."/>
<Text Id="AppendMessage" Title="Append Message" Multiline="true" HelpText="Added after the prompt. Useful with formulas."/>
<Select Id="Model" DefaultValue="gpt-3.5-turbo-0301" Required="true">
<DataSource>
<Item Id="gpt-3.5-turbo"/>
<Item Id="gpt-3.5-turbo-0301"/>
<Item Id="gpt-3.5-turbo-0613"/>
<Item Id="gpt-3.5-turbo-16k"/>
<Item Id="gpt-4"/>
<Item Id="gpt-4-32k"/>
</DataSource>
</Select>
<Number Id="Temperature" Minimum="0" DefaultValue="0" Maximum="1" Format="N1" HelpText="Controls randomness: Lowering results in fewer random completions. As the temperature approaches zero, the model will become deterministic and repetitive."/>
<Number Id="MaxTokens" Title="Maximum Length" Minimum="1" DefaultValue="64" Maximum="4000" HelpText="The maximum number of tokens to generate. The exact limit varies by model. (One token is roughly 4 characters for normal English text.)"/>
<Text Id="StopSequence" Title="Stop Sequence" HelpText="Sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence."/>
<Number Id="TopP" Title="Top P" Minimum="0" DefaultValue="1" Maximum="1" Format="N1" HelpText="Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered."/>
<Number Id="Frequency" Minimum="0" DefaultValue="0" Maximum="2" Format="N1" HelpText="How much to penalize new tokens based on their existing frequency in the text so far. Decreases the model's likelihood to repeat the same line verbatim."/>
<Number Id="PresencePenalty" Title="Presence Penalty" Minimum="0" DefaultValue="0" Maximum="2" Format="N1" HelpText="How much to penalize new tokens based on whether they appear in the text so far. Increases the model's likelihood to talk about new topics."/>
<Number Id="Delay" Title="Delay Between Requests (ms)" DefaultValue="1000" Minimum="0" Maximum="1000000" Format="N0" HelpText="The delay between requests in milliseconds. This is useful when you want to avoid hitting the API rate limit." HelpUrl="https://help.openai.com/en/articles/5955598-is-api-usage-subject-to-any-rate-limits"/>
</Parameters>
<Fetch>
<HttpSettings>
<Resource Id="HttpProps"/>
<RequestMethod>POST</RequestMethod>
<IntervalBetweenRequests RandomFrom="@(Model.Delay)" RandomTo="@(Model.Delay)" IfSame="Host"/>
<RequestBody>
<![CDATA[
@{
var dict = new Dictionary<string, object>();
dict.Add("model", Model.Model);
dict.Add("temperature", Model.Temperature);
dict.Add("max_tokens", (int)Model.MaxTokens);
dict.Add("top_p", Model.TopP);
dict.Add("frequency_penalty", Model.Frequency);
dict.Add("presence_penalty", Model.PresencePenalty);
var messages = new[]
{
new { role = "user", content = (Model.PrependMessage + Model.Message + Model.AppendMessage).Trim() }
};
dict.Add("messages", messages);
if(!string.IsNullOrEmpty(Model.Functions))
{
dict.Add("functions", Model.Functions.Trim());
}
if(!string.IsNullOrEmpty(Model.StopSequence))
{
dict.Add("stop", new[] { Model.StopSequence });
}
}
@JsonConvert.SerializeObject(dict)
]]>
</RequestBody>
</HttpSettings>
<Fetch.Url>
<![CDATA[
https://api.openai.com/v1/chat/completions
]]>
</Fetch.Url>
</Fetch>
<Parse>
<JsonPath Expr="$.choices[0].message.content" Id="Content"/>
<JsonPath Expr="$.choices[0].message.function_call.arguments" Id="FunctionCall" Title="Function Call" Checked="false"/>
</Parse>
<Resource Id="Fail"/>
</RestConnector>
<RestConnector Id="Models" Title="Models" HelpUrl="https://platform.openai.com/docs/api-reference/models/list" Hidden="true">
<Fetch>
<HttpSettings>
<Resource Id="HttpProps"/>
</HttpSettings>
<Fetch.Url>
<![CDATA[
https://api.openai.com/v1/models
]]>
</Fetch.Url>
</Fetch>
<Parse>
<JsonPath Expr="$.choices[0].message.content" Id="Text"/>
</Parse>
<Resource Id="Fail"/>
</RestConnector>
</Suite>