|
227 | 227 | } |
228 | 228 | } |
229 | 229 | }, |
230 | | - "tags": ["Chat"] |
| 230 | + "tags": [ |
| 231 | + "Chat" |
| 232 | + ] |
231 | 233 | } |
232 | 234 | }, |
233 | 235 | "/v1/models/pull": { |
|
664 | 666 | } |
665 | 667 | } |
666 | 668 | }, |
667 | | - "tags": ["Models"] |
| 669 | + "tags": [ |
| 670 | + "Models" |
| 671 | + ] |
668 | 672 | } |
669 | 673 | }, |
670 | 674 | "/v1/threads": { |
|
2235 | 2239 | "user": { |
2236 | 2240 | "type": "string", |
2237 | 2241 | "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. We are actively working on this feature to bring cortex as fully OpenAI compatible platform. Planning and roadmap for this feature can be found [**here**](https://github.com/janhq/cortex.cpp/issues/1582)." |
| 2242 | + }, |
| 2243 | + "dynatemp_range": { |
| 2244 | + "type": "number", |
| 2245 | + "description": "Dynamic temperature range. This parameter only supported by `llama-cpp` engine." |
| 2246 | + }, |
| 2247 | + "dynatemp_exponent": { |
| 2248 | + "type": "number", |
| 2249 | + "description": "Dynamic temperature exponent. This parameter only supported by `llama-cpp` engine." |
| 2250 | + }, |
| 2251 | + "top_k": { |
| 2252 | + "type": "integer", |
| 2253 | + "description": "The number of most likely tokens to consider at each step. This parameter only supported by `llama-cpp` engine." |
| 2254 | + }, |
| 2255 | + "min_p": { |
| 2256 | + "type": "number", |
| 2257 | + "description": "Minimum probability threshold for token sampling. This parameter only supported by `llama-cpp` engine." |
| 2258 | + }, |
| 2259 | + "tfs_z": { |
| 2260 | + "type": "number", |
| 2261 | + "description": "The z-score used for Typical token sampling. This parameter only supported by `llama-cpp` engine." |
| 2262 | + }, |
| 2263 | + "typ_p": { |
| 2264 | + "type": "number", |
| 2265 | + "description": "The cumulative probability threshold used for Typical token sampling. This parameter only supported by `llama-cpp` engine." |
| 2266 | + }, |
| 2267 | + "repeat_last_n": { |
| 2268 | + "type": "integer", |
| 2269 | + "description": "Number of previous tokens to penalize for repeating. This parameter only supported by `llama-cpp` engine." |
| 2270 | + }, |
| 2271 | + "repeat_penalty": { |
| 2272 | + "type": "number", |
| 2273 | + "description": "Penalty for repeating tokens. This parameter only supported by `llama-cpp` engine." |
| 2274 | + }, |
| 2275 | + "mirostat": { |
| 2276 | + "type": "boolean", |
| 2277 | + "description": "Enables or disables Mirostat sampling (true or false). This parameter only supported by `llama-cpp` engine." |
| 2278 | + }, |
| 2279 | + "mirostat_tau": { |
| 2280 | + "type": "number", |
| 2281 | + "description": "Target entropy value for Mirostat sampling. This parameter only supported by `llama-cpp` engine." |
| 2282 | + }, |
| 2283 | + "mirostat_eta": { |
| 2284 | + "type": "number", |
| 2285 | + "description": "Learning rate for Mirostat sampling. This parameter only supported by `llama-cpp` engine." |
| 2286 | + }, |
| 2287 | + "penalize_nl": { |
| 2288 | + "type": "boolean", |
| 2289 | + "description": "Penalizes newline tokens (true or false). This parameter only supported by `llama-cpp` engine." |
| 2290 | + }, |
| 2291 | + "ignore_eos": { |
| 2292 | + "type": "boolean", |
| 2293 | + "description": "Ignores the end-of-sequence token (true or false). This parameter only supported by `llama-cpp` engine." |
| 2294 | + }, |
| 2295 | + "n_probs": { |
| 2296 | + "type": "integer", |
| 2297 | + "description": "Number of probabilities to return. This parameter only supported by `llama-cpp` engine." |
| 2298 | + }, |
| 2299 | + "min_keep": { |
| 2300 | + "type": "integer", |
| 2301 | + "description": "Minimum number of tokens to keep. This parameter only supported by `llama-cpp` engine." |
2238 | 2302 | } |
2239 | 2303 | }, |
2240 | 2304 | "required": [ |
|
3189 | 3253 | "description": "The display name of the model." |
3190 | 3254 | } |
3191 | 3255 | }, |
3192 | | - "required": ["model", "modelPath"] |
| 3256 | + "required": [ |
| 3257 | + "model", |
| 3258 | + "modelPath" |
| 3259 | + ] |
3193 | 3260 | }, |
3194 | 3261 | "ImportModelResponse": { |
3195 | 3262 | "type": "object", |
|
3208 | 3275 | "example": "OK" |
3209 | 3276 | } |
3210 | 3277 | }, |
3211 | | - "required": ["message", "modelHandle", "result"] |
| 3278 | + "required": [ |
| 3279 | + "message", |
| 3280 | + "modelHandle", |
| 3281 | + "result" |
| 3282 | + ] |
3212 | 3283 | }, |
3213 | 3284 | "CommonResponseDto": { |
3214 | 3285 | "type": "object", |
|
0 commit comments