diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 43fd5a7..763462f 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -9,7 +9,9 @@ "postCreateCommand": "yarn install", "customizations": { "vscode": { - "extensions": ["esbenp.prettier-vscode"] + "extensions": [ + "esbenp.prettier-vscode" + ] } } } diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 index 0000000..60f0e7a --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,10 @@ +module.exports = { + parser: '@typescript-eslint/parser', + plugins: ['@typescript-eslint', 'unused-imports', 'prettier'], + rules: { + 'no-unused-vars': 'off', + 'prettier/prettier': 'error', + 'unused-imports/no-unused-imports': 'error', + }, + root: true, +}; diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3eb82ab..e9d246f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,14 +16,14 @@ jobs: lint: timeout-minutes: 10 name: lint - runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-typescript' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 - name: Set up Node uses: actions/setup-node@v4 with: - node-version: '20' + node-version: '18' - name: Bootstrap run: ./scripts/bootstrap @@ -34,7 +34,7 @@ jobs: build: timeout-minutes: 5 name: build - runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-typescript' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} permissions: contents: read id-token: write @@ -44,7 +44,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v4 with: - node-version: '20' + node-version: '18' - name: Bootstrap run: ./scripts/bootstrap @@ -53,14 +53,14 @@ jobs: run: ./scripts/build - name: Get GitHub OIDC Token - if: github.repository == 'stainless-sdks/llama-stack-client-typescript' + if: github.repository == 'stainless-sdks/llama-stack-client-node' id: github-oidc uses: actions/github-script@v6 with: script: core.setOutput('github_token', await core.getIDToken()); - name: Upload tarball - if: github.repository == 'stainless-sdks/llama-stack-client-typescript' + if: github.repository == 'stainless-sdks/llama-stack-client-node' env: URL: https://pkg.stainless.com/s AUTH: ${{ steps.github-oidc.outputs.github_token }} @@ -69,7 +69,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-typescript' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index eb32244..625635a 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -18,3 +18,4 @@ jobs: run: | bash ./bin/check-release-environment env: + diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d7a8735..c5e8a3e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.1" + ".": "0.1.0-alpha.2" } diff --git a/.stats.yml b/.stats.yml index 32b2676..fe4493c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 91 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-0e756984d87c3fd1eb96d486947b3bc2086d5afcf299e8119b6b89bbd86dbe75.yml -openapi_spec_hash: 7c519a25bb9a094d4b4bda17bb20dd88 -config_hash: d1f21dfdbf5d9925eecf56b6c1fab755 +configured_endpoints: 105 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-df7a19394e9124c18ec4e888e2856d22b5ebfd6fe6fe6e929ff6cfadb2ae7e2a.yml +openapi_spec_hash: 9428682672fdd7e2afee7af9ef849dc9 +config_hash: e1d37a77a6e8ca86fb6bccb4b0f172c9 diff --git a/CHANGELOG.md b/CHANGELOG.md index c6b43f2..6fb8d8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## 0.1.0-alpha.2 (2025-06-27) + +Full Changelog: [v0.1.0-alpha.1...v0.1.0-alpha.2](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.1.0-alpha.1...v0.1.0-alpha.2) + +### Features + +* **api:** update via SDK Studio ([a00f961](https://github.com/llamastack/llama-stack-client-typescript/commit/a00f961a3a4a8961cd54ad6a92a52aa34cb0d041)) +* **api:** update via SDK Studio ([bef1e47](https://github.com/llamastack/llama-stack-client-typescript/commit/bef1e47ad9fe9a03e8ffdaa632981c0666919b73)) +* **api:** update via SDK Studio ([7fb44fa](https://github.com/llamastack/llama-stack-client-typescript/commit/7fb44fab41cd95410115d12a7855fd12fbd3b34c)) + ## 0.1.0-alpha.1 (2025-06-27) Full Changelog: [v0.0.1-alpha.0...v0.1.0-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.0.1-alpha.0...v0.1.0-alpha.1) diff --git a/README.md b/README.md index 3a9c734..a27b8c1 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ -# Llama Stack Client TypeScript API Library +# Llama Stack Client Node API Library -[![NPM version]()](https://npmjs.org/package/llama-stack-client) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/llama-stack-client) +[![NPM version](https://img.shields.io/npm/v/llama-stack-client.svg)](https://npmjs.org/package/llama-stack-client) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/llama-stack-client) This library provides convenient access to the Llama Stack Client REST API from server-side TypeScript or JavaScript. -The full API of this library can be found in [api.md](api.md). +The REST API documentation can be found on [llama-stack.readthedocs.io](https://llama-stack.readthedocs.io/en/latest/). The full API of this library can be found in [api.md](api.md). It is generated with [Stainless](https://www.stainless.com/). @@ -25,13 +25,35 @@ The full API of this library can be found in [api.md](api.md). ```js import LlamaStackClient from 'llama-stack-client'; -const client = new LlamaStackClient({ - apiKey: process.env['LLAMA_STACK_CLIENT_API_KEY'], // This is the default and can be omitted -}); +const client = new LlamaStackClient(); + +const model = await client.models.register({ model_id: 'model_id' }); -await client.datasetio.appendRows('REPLACE_ME', { rows: [{ foo: true }] }); +console.log(model.identifier); ``` +## Streaming responses + +We provide support for streaming responses using Server Sent Events (SSE). + +```ts +import LlamaStackClient from 'llama-stack-client'; + +const client = new LlamaStackClient(); + +const stream = await client.inference.chatCompletion({ + messages: [{ content: 'string', role: 'user' }], + model_id: 'model_id', + stream: true, +}); +for await (const chatCompletionResponseStreamChunk of stream) { + console.log(chatCompletionResponseStreamChunk.completion_message); +} +``` + +If you need to cancel a stream, you can `break` from the loop +or call `stream.controller.abort()`. + ### Request & Response types This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: @@ -40,16 +62,49 @@ This library includes TypeScript definitions for all request params and response ```ts import LlamaStackClient from 'llama-stack-client'; -const client = new LlamaStackClient({ - apiKey: process.env['LLAMA_STACK_CLIENT_API_KEY'], // This is the default and can be omitted -}); +const client = new LlamaStackClient(); -const params: LlamaStackClient.DatasetioAppendRowsParams = { rows: [{ foo: true }] }; -await client.datasetio.appendRows('REPLACE_ME', params); +const params: LlamaStackClient.InferenceChatCompletionParams = { + messages: [{ content: 'string', role: 'user' }], + model_id: 'model_id', +}; +const chatCompletionResponse: LlamaStackClient.ChatCompletionResponse = await client.inference.chatCompletion( + params, +); ``` Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. +## File uploads + +Request parameters that correspond to file uploads can be passed in many different forms: + +- `File` (or an object with the same structure) +- a `fetch` `Response` (or an object with the same structure) +- an `fs.ReadStream` +- the return value of our `toFile` helper + +```ts +import fs from 'fs'; +import fetch from 'node-fetch'; +import LlamaStackClient, { toFile } from 'llama-stack-client'; + +const client = new LlamaStackClient(); + +// If you have access to Node `fs` we recommend using `fs.createReadStream()`: +await client.files.create({ file: fs.createReadStream('/path/to/file'), purpose: 'assistants' }); + +// Or if you have the web `File` API you can pass a `File` instance: +await client.files.create({ file: new File(['my bytes'], 'file'), purpose: 'assistants' }); + +// You can also pass a `fetch` `Response`: +await client.files.create({ file: await fetch('https://somesite/file'), purpose: 'assistants' }); + +// Finally, if none of the above are convenient, you can use our `toFile` helper: +await client.files.create({ file: await toFile(Buffer.from('my bytes'), 'file'), purpose: 'assistants' }); +await client.files.create({ file: await toFile(new Uint8Array([0, 1, 2]), 'file'), purpose: 'assistants' }); +``` + ## Handling errors When the library is unable to connect to the API, @@ -58,8 +113,8 @@ a subclass of `APIError` will be thrown: ```ts -const response = await client.datasetio - .appendRows('REPLACE_ME', { rows: [{ foo: true }] }) +const chatCompletionResponse = await client.inference + .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }) .catch(async (err) => { if (err instanceof LlamaStackClient.APIError) { console.log(err.status); // 400 @@ -100,7 +155,7 @@ const client = new LlamaStackClient({ }); // Or, configure per-request: -await client.datasetio.appendRows('REPLACE_ME', { rows: [{ foo: true }] }, { +await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, { maxRetries: 5, }); ``` @@ -117,7 +172,7 @@ const client = new LlamaStackClient({ }); // Override per-request: -await client.datasetio.appendRows('REPLACE_ME', { rows: [{ foo: true }] }, { +await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, { timeout: 5 * 1000, }); ``` @@ -131,77 +186,24 @@ Note that requests which time out will be [retried twice by default](#retries). ### Accessing raw Response data (e.g., headers) The "raw" `Response` returned by `fetch()` can be accessed through the `.asResponse()` method on the `APIPromise` type that all methods return. -This method returns as soon as the headers for a successful response are received and does not consume the response body, so you are free to write custom parsing or streaming logic. You can also use the `.withResponse()` method to get the raw `Response` along with the parsed data. -Unlike `.asResponse()` this method consumes the body, returning once it is parsed. ```ts const client = new LlamaStackClient(); -const response = await client.datasetio.appendRows('REPLACE_ME', { rows: [{ foo: true }] }).asResponse(); +const response = await client.inference + .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }) + .asResponse(); console.log(response.headers.get('X-My-Header')); console.log(response.statusText); // access the underlying Response object -const { data: result, response: raw } = await client.datasetio - .appendRows('REPLACE_ME', { rows: [{ foo: true }] }) +const { data: chatCompletionResponse, response: raw } = await client.inference + .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); -console.log(result); -``` - -### Logging - -> [!IMPORTANT] -> All log messages are intended for debugging only. The format and content of log messages -> may change between releases. - -#### Log levels - -The log level can be configured in two ways: - -1. Via the `LLAMA_STACK_CLIENT_LOG` environment variable -2. Using the `logLevel` client option (overrides the environment variable if set) - -```ts -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - logLevel: 'debug', // Show all log messages -}); -``` - -Available log levels, from most to least verbose: - -- `'debug'` - Show debug messages, info, warnings, and errors -- `'info'` - Show info messages, warnings, and errors -- `'warn'` - Show warnings and errors (default) -- `'error'` - Show only errors -- `'off'` - Disable all logging - -At the `'debug'` level, all HTTP requests and responses are logged, including headers and bodies. -Some authentication-related headers are redacted, but sensitive data in request and response bodies -may still be visible. - -#### Custom logger - -By default, this library logs to `globalThis.console`. You can also provide a custom logger. -Most logging libraries are supported, including [pino](https://www.npmjs.com/package/pino), [winston](https://www.npmjs.com/package/winston), [bunyan](https://www.npmjs.com/package/bunyan), [consola](https://www.npmjs.com/package/consola), [signale](https://www.npmjs.com/package/signale), and [@std/log](https://jsr.io/@std/log). If your logger doesn't work, please open an issue. - -When providing a custom logger, the `logLevel` option still controls which messages are emitted, messages -below the configured level will not be sent to your logger. - -```ts -import LlamaStackClient from 'llama-stack-client'; -import pino from 'pino'; - -const logger = pino(); - -const client = new LlamaStackClient({ - logger: logger.child({ name: 'LlamaStackClient' }), - logLevel: 'debug', // Send all messages to pino, allowing it to filter -}); +console.log(chatCompletionResponse.completion_message); ``` ### Making custom/undocumented requests @@ -228,8 +230,9 @@ parameter. This library doesn't validate at runtime that the request matches the send will be sent as-is. ```ts -client.datasetio.appendRows({ - // ... +client.foo.create({ + foo: 'my_param', + bar: 12, // @ts-expect-error baz is not yet public baz: 'undocumented option', }); @@ -249,85 +252,69 @@ validate or strip extra properties from the response from the API. ### Customizing the fetch client -By default, this library expects a global `fetch` function is defined. - -If you want to use a different `fetch` function, you can either polyfill the global: +By default, this library uses `node-fetch` in Node, and expects a global `fetch` function in other environments. -```ts -import fetch from 'my-fetch'; - -globalThis.fetch = fetch; -``` - -Or pass it to the client: +If you would prefer to use a global, web-standards-compliant `fetch` function even in a Node environment, +(for example, if you are running Node with `--experimental-fetch` or using NextJS which polyfills with `undici`), +add the following import before your first import `from "LlamaStackClient"`: ```ts +// Tell TypeScript and the package to use the global web fetch instead of node-fetch. +// Note, despite the name, this does not add any polyfills, but expects them to be provided if needed. +import 'llama-stack-client/shims/web'; import LlamaStackClient from 'llama-stack-client'; -import fetch from 'my-fetch'; - -const client = new LlamaStackClient({ fetch }); ``` -### Fetch options +To do the inverse, add `import "llama-stack-client/shims/node"` (which does import polyfills). +This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/llamastack/llama-stack-client-typescript/tree/main/src/_shims#readme)). -If you want to set custom `fetch` options without overriding the `fetch` function, you can provide a `fetchOptions` object when instantiating the client or making a request. (Request-specific options override client options.) +### Logging and middleware + +You may also provide a custom `fetch` function when instantiating the client, +which can be used to inspect or alter the `Request` or `Response` before/after each request: ```ts +import { fetch } from 'undici'; // as one example import LlamaStackClient from 'llama-stack-client'; const client = new LlamaStackClient({ - fetchOptions: { - // `RequestInit` options + fetch: async (url: RequestInfo, init?: RequestInit): Promise => { + console.log('About to make a request', url, init); + const response = await fetch(url, init); + console.log('Got response', response); + return response; }, }); ``` -#### Configuring proxies +Note that if given a `DEBUG=true` environment variable, this library will log all requests and responses automatically. +This is intended for debugging purposes only and may change in the future without notice. -To modify proxy behavior, you can provide custom `fetchOptions` that add runtime-specific proxy -options to requests: +### Configuring an HTTP(S) Agent (e.g., for proxies) - **Node** [[docs](https://github.com/nodejs/undici/blob/main/docs/docs/api/ProxyAgent.md#example---proxyagent-with-fetch)] +By default, this library uses a stable agent for all http/https requests to reuse TCP connections, eliminating many TCP & TLS handshakes and shaving around 100ms off most requests. -```ts -import LlamaStackClient from 'llama-stack-client'; -import * as undici from 'undici'; - -const proxyAgent = new undici.ProxyAgent('http://localhost:8888'); -const client = new LlamaStackClient({ - fetchOptions: { - dispatcher: proxyAgent, - }, -}); -``` - - **Bun** [[docs](https://bun.sh/guides/http/proxy)] +If you would like to disable or customize this behavior, for example to use the API behind a proxy, you can pass an `httpAgent` which is used for all requests (be they http or https), for example: + ```ts -import LlamaStackClient from 'llama-stack-client'; +import http from 'http'; +import { HttpsProxyAgent } from 'https-proxy-agent'; +// Configure the default for all requests: const client = new LlamaStackClient({ - fetchOptions: { - proxy: 'http://localhost:8888', - }, + httpAgent: new HttpsProxyAgent(process.env.PROXY_URL), }); -``` - - **Deno** [[docs](https://docs.deno.com/api/deno/~/Deno.createHttpClient)] - -```ts -import LlamaStackClient from 'npm:llama-stack-client'; -const httpClient = Deno.createHttpClient({ proxy: { url: 'http://localhost:8888' } }); -const client = new LlamaStackClient({ - fetchOptions: { - client: httpClient, +// Override per-request: +await client.inference.chatCompletion( + { messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, + { + httpAgent: new http.Agent({ keepAlive: false }), }, -}); +); ``` -## Frequently Asked Questions - ## Semantic versioning This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: @@ -342,12 +329,12 @@ We are keen for your feedback; please open an [issue](https://www.github.com/lla ## Requirements -TypeScript >= 4.9 is supported. +TypeScript >= 4.5 is supported. The following runtimes are supported: - Web browsers (Up-to-date Chrome, Firefox, Safari, Edge, and more) -- Node.js 20 LTS or later ([non-EOL](https://endoflife.date/nodejs)) versions. +- Node.js 18 LTS or later ([non-EOL](https://endoflife.date/nodejs)) versions. - Deno v1.28.0 or higher. - Bun 1.0 or later. - Cloudflare Workers. diff --git a/SECURITY.md b/SECURITY.md index 52ce19c..1b5f3a4 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -18,6 +18,10 @@ before making any information public. If you encounter security issues that are not directly related to SDKs but pertain to the services or products provided by Llama Stack Client, please follow the respective company's security reporting guidelines. +### Llama Stack Client Terms and Policies + +Please contact llamastack@meta.com for any questions or concerns regarding the security of our services. + --- Thank you for helping us keep the SDKs and systems they interact with secure. diff --git a/api.md b/api.md index 158a31f..f89c31c 100644 --- a/api.md +++ b/api.md @@ -1,445 +1,465 @@ -# Datasetio +# Shared + +Types: + +- AgentConfig +- BatchCompletion +- ChatCompletionResponse +- CompletionMessage +- ContentDelta +- Document +- InterleavedContent +- InterleavedContentItem +- Message +- ParamType +- QueryConfig +- QueryGeneratorConfig +- QueryResult +- ResponseFormat +- ReturnType +- SafetyViolation +- SamplingParams +- ScoringResult +- SystemMessage +- ToolCall +- ToolCallOrString +- ToolParamDefinition +- ToolResponseMessage +- UserMessage + +# Toolgroups Types: -- DatasetioIterateRowsResponse +- ListToolGroupsResponse +- ToolGroup +- ToolgroupListResponse Methods: -- client.datasetio.appendRows(datasetID, { ...params }) -> void -- client.datasetio.iterateRows(datasetID, { ...params }) -> DatasetioIterateRowsResponse +- client.toolgroups.list() -> ToolgroupListResponse +- client.toolgroups.get(toolgroupId) -> ToolGroup +- client.toolgroups.register({ ...params }) -> void +- client.toolgroups.unregister(toolgroupId) -> void -# Inference +# Tools Types: -- ChatCompletionResponse -- CompletionMessage -- CompletionResponse -- InterleavedContent -- InterleavedContentItem -- Message -- MetricInResponse -- ResponseFormat -- SamplingParams -- SystemMessage -- TokenLogProbs -- ToolCall -- ToolConfig -- ToolDefinition -- InferenceBatchChatCompletionResponse -- InferenceBatchCompletionResponse -- InferenceEmbeddingsResponse +- ListToolsResponse +- Tool +- ToolListResponse Methods: -- client.inference.batchChatCompletion({ ...params }) -> InferenceBatchChatCompletionResponse -- client.inference.batchCompletion({ ...params }) -> InferenceBatchCompletionResponse -- client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse -- client.inference.completion({ ...params }) -> CompletionResponse -- client.inference.embeddings({ ...params }) -> InferenceEmbeddingsResponse +- client.tools.list({ ...params }) -> ToolListResponse +- client.tools.get(toolName) -> Tool -# PostTraining +# ToolRuntime Types: -- PostTrainingJob -- TrainingConfig -- PostTrainingListJobsResponse +- ToolDef +- ToolInvocationResult +- ToolRuntimeListToolsResponse Methods: -- client.postTraining.fineTuneSupervised({ ...params }) -> PostTrainingJob -- client.postTraining.listJobs() -> PostTrainingListJobsResponse -- client.postTraining.optimizePreferences({ ...params }) -> PostTrainingJob - -## Job - -Types: +- client.toolRuntime.invokeTool({ ...params }) -> ToolInvocationResult +- client.toolRuntime.listTools({ ...params }) -> ToolRuntimeListToolsResponse -- JobRetrieveArtifactsResponse -- JobRetrieveStatusResponse +## RagTool Methods: -- client.postTraining.job.cancel({ ...params }) -> void -- client.postTraining.job.retrieveArtifacts({ ...params }) -> JobRetrieveArtifactsResponse -- client.postTraining.job.retrieveStatus({ ...params }) -> JobRetrieveStatusResponse +- client.toolRuntime.ragTool.insert({ ...params }) -> void +- client.toolRuntime.ragTool.query({ ...params }) -> QueryResult -# Agents +# Responses Types: -- Agent -- AgentConfig -- AgentCreateResponse -- AgentListResponse -- AgentListSessionsResponse +- ResponseObject +- ResponseObjectStream +- ResponseListResponse Methods: -- client.agents.create({ ...params }) -> AgentCreateResponse -- client.agents.retrieve(agentID) -> Agent -- client.agents.list() -> AgentListResponse -- client.agents.delete(agentID) -> void -- client.agents.listSessions(agentID) -> AgentListSessionsResponse +- client.responses.create({ ...params }) -> ResponseObject +- client.responses.retrieve(responseId) -> ResponseObject +- client.responses.list({ ...params }) -> ResponseListResponse -## Session +## InputItems Types: -- Session -- SessionCreateResponse +- InputItemListResponse Methods: -- client.agents.session.create(agentID, { ...params }) -> SessionCreateResponse -- client.agents.session.retrieve(sessionID, { ...params }) -> Session -- client.agents.session.delete(sessionID, { ...params }) -> void +- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse -### Turn +# Agents Types: -- AgentTool -- InferenceStep -- MemoryRetrievalStep -- ShieldCallStep -- ToolExecutionStep -- ToolResponse -- ToolResponseMessage -- Turn -- UserMessage +- InferenceStep +- MemoryRetrievalStep +- ShieldCallStep +- ToolExecutionStep +- ToolResponse +- AgentCreateResponse +- AgentRetrieveResponse +- AgentListResponse Methods: -- client.agents.session.turn.create(sessionID, { ...params }) -> Turn -- client.agents.session.turn.retrieve(turnID, { ...params }) -> Turn -- client.agents.session.turn.resume(turnID, { ...params }) -> Turn +- client.agents.create({ ...params }) -> AgentCreateResponse +- client.agents.retrieve(agentId) -> AgentRetrieveResponse +- client.agents.list({ ...params }) -> AgentListResponse +- client.agents.delete(agentId) -> void -#### Step +## Session Types: -- StepRetrieveResponse +- Session +- SessionCreateResponse +- SessionListResponse Methods: -- client.agents.session.turn.step.retrieve(stepID, { ...params }) -> StepRetrieveResponse - -# OpenAI +- client.agents.session.create(agentId, { ...params }) -> SessionCreateResponse +- client.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session +- client.agents.session.list(agentId, { ...params }) -> SessionListResponse +- client.agents.session.delete(agentId, sessionId) -> void -## V1 +## Steps Types: -- ChoiceLogprobs -- TokenLogProb -- V1GenerateCompletionResponse -- V1ListModelsResponse +- StepRetrieveResponse Methods: -- client.openai.v1.generateCompletion({ ...params }) -> V1GenerateCompletionResponse -- client.openai.v1.listModels() -> V1ListModelsResponse +- client.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse -### Responses +## Turn Types: -- OpenAIResponse +- AgentTurnResponseStreamChunk +- Turn +- TurnResponseEvent +- TurnResponseEventPayload Methods: -- client.openai.v1.responses.create({ ...params }) -> OpenAIResponse -- client.openai.v1.responses.retrieve(id) -> OpenAIResponse +- client.agents.turn.create(agentId, sessionId, { ...params }) -> Turn +- client.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn +- client.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn -### Chat +# Datasets Types: -- ChatCompletionContentPart -- ChatCompletionToolCall -- MessageParam -- ChatGenerateCompletionResponse +- ListDatasetsResponse +- DatasetRetrieveResponse +- DatasetListResponse +- DatasetIterrowsResponse +- DatasetRegisterResponse Methods: -- client.openai.v1.chat.generateCompletion({ ...params }) -> ChatGenerateCompletionResponse +- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse +- client.datasets.list() -> DatasetListResponse +- client.datasets.appendrows(datasetId, { ...params }) -> void +- client.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse +- client.datasets.register({ ...params }) -> DatasetRegisterResponse +- client.datasets.unregister(datasetId) -> void -# Files +# Eval Types: -- File -- FileUpload -- FileListResponse -- FileListInBucketResponse +- BenchmarkConfig +- EvalCandidate +- EvaluateResponse +- Job Methods: -- client.files.retrieve(key, { ...params }) -> File -- client.files.list({ ...params }) -> FileListResponse -- client.files.delete(key, { ...params }) -> void -- client.files.createUploadSession({ ...params }) -> FileUpload -- client.files.listInBucket(bucket) -> FileListInBucketResponse +- client.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse +- client.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse +- client.eval.runEval(benchmarkId, { ...params }) -> Job +- client.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job -## Session +## Jobs Methods: -- client.files.session.retrieve(uploadID) -> FileUpload -- client.files.session.uploadContent(uploadID, { ...params }) -> File | null +- client.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse +- client.eval.jobs.cancel(benchmarkId, jobId) -> void +- client.eval.jobs.status(benchmarkId, jobId) -> Job -# Eval - -## Benchmarks +# Inspect Types: -- Benchmark -- BenchmarkConfig -- EvaluateResponse -- BenchmarkListResponse +- HealthInfo +- ProviderInfo +- RouteInfo +- VersionInfo Methods: -- client.eval.benchmarks.create({ ...params }) -> void -- client.eval.benchmarks.retrieve(benchmarkID) -> Benchmark -- client.eval.benchmarks.list() -> BenchmarkListResponse -- client.eval.benchmarks.evaluate(benchmarkID, { ...params }) -> EvaluateResponse +- client.inspect.health() -> HealthInfo +- client.inspect.version() -> VersionInfo -### Jobs +# Inference Types: -- Job +- ChatCompletionResponseStreamChunk +- CompletionResponse +- EmbeddingsResponse +- TokenLogProbs +- InferenceBatchChatCompletionResponse Methods: -- client.eval.benchmarks.jobs.retrieve(jobID, { ...params }) -> Job -- client.eval.benchmarks.jobs.cancel(jobID, { ...params }) -> void -- client.eval.benchmarks.jobs.result(jobID, { ...params }) -> EvaluateResponse -- client.eval.benchmarks.jobs.run(benchmarkID, { ...params }) -> Job +- client.inference.batchChatCompletion({ ...params }) -> InferenceBatchChatCompletionResponse +- client.inference.batchCompletion({ ...params }) -> BatchCompletion +- client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse +- client.inference.completion({ ...params }) -> CompletionResponse +- client.inference.embeddings({ ...params }) -> EmbeddingsResponse -# Datasets +# Embeddings Types: -- DataSource -- Dataset -- DatasetListResponse +- CreateEmbeddingsResponse Methods: -- client.datasets.create({ ...params }) -> Dataset -- client.datasets.retrieve(datasetID) -> Dataset -- client.datasets.list() -> DatasetListResponse -- client.datasets.delete(datasetID) -> void +- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse -# Models +# Chat Types: -- Model -- ModelType -- ModelListResponse +- ChatCompletionChunk -Methods: - -- client.models.create({ ...params }) -> Model -- client.models.retrieve(modelID) -> Model -- client.models.list() -> ModelListResponse -- client.models.delete(modelID) -> void - -# ScoringFunctions +## Completions Types: -- AggregationFunctionType -- ParamType -- ScoringFn -- ScoringFnParams -- ScoringFnParamsType -- ScoringFunctionListResponse +- CompletionCreateResponse +- CompletionRetrieveResponse +- CompletionListResponse Methods: -- client.scoringFunctions.create({ ...params }) -> void -- client.scoringFunctions.retrieve(scoringFnID) -> ScoringFn -- client.scoringFunctions.list() -> ScoringFunctionListResponse +- client.chat.completions.create({ ...params }) -> CompletionCreateResponse +- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse +- client.chat.completions.list({ ...params }) -> CompletionListResponse -# Shields +# Completions Types: -- Shield -- ShieldListResponse +- CompletionCreateResponse Methods: -- client.shields.create({ ...params }) -> Shield -- client.shields.retrieve(identifier) -> Shield -- client.shields.list() -> ShieldListResponse +- client.completions.create({ ...params }) -> CompletionCreateResponse -# Telemetry +# VectorIo Types: -- EventType -- StructuredLogType +- QueryChunksResponse Methods: -- client.telemetry.createEvent({ ...params }) -> void +- client.vectorIo.insert({ ...params }) -> void +- client.vectorIo.query({ ...params }) -> QueryChunksResponse -## Traces +# VectorDBs Types: -- Span -- Trace -- TraceCreateResponse +- ListVectorDBsResponse +- VectorDBRetrieveResponse +- VectorDBListResponse +- VectorDBRegisterResponse Methods: -- client.telemetry.traces.create({ ...params }) -> TraceCreateResponse -- client.telemetry.traces.retrieveSpan(spanID, { ...params }) -> Span -- client.telemetry.traces.retrieveTrace(traceID) -> Trace +- client.vectorDBs.retrieve(vectorDBId) -> VectorDBRetrieveResponse +- client.vectorDBs.list() -> VectorDBListResponse +- client.vectorDBs.register({ ...params }) -> VectorDBRegisterResponse +- client.vectorDBs.unregister(vectorDBId) -> void -## Spans +# VectorStores Types: -- QueryCondition -- SpanCreateResponse -- SpanBuildTreeResponse +- ListVectorStoresResponse +- VectorStore +- VectorStoreDeleteResponse +- VectorStoreSearchResponse Methods: -- client.telemetry.spans.create({ ...params }) -> SpanCreateResponse -- client.telemetry.spans.buildTree(spanID, { ...params }) -> SpanBuildTreeResponse -- client.telemetry.spans.export({ ...params }) -> void +- client.vectorStores.create({ ...params }) -> VectorStore +- client.vectorStores.retrieve(vectorStoreId) -> VectorStore +- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore +- client.vectorStores.list({ ...params }) -> ListVectorStoresResponse +- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse +- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse -# Tools +## Files Types: -- Tool -- ToolParameter -- ToolListResponse +- VectorStoreFile +- FileListResponse +- FileDeleteResponse +- FileContentResponse Methods: -- client.tools.retrieve(toolName) -> Tool -- client.tools.list({ ...params }) -> ToolListResponse +- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile +- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.list(vectorStoreId, { ...params }) -> FileListResponse +- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse +- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse -# Toolgroups +# Models Types: -- ToolGroup -- ToolgroupListResponse +- ListModelsResponse +- Model +- ModelListResponse Methods: -- client.toolgroups.retrieve(toolgroupID) -> ToolGroup -- client.toolgroups.list() -> ToolgroupListResponse -- client.toolgroups.register({ ...params }) -> void -- client.toolgroups.unregister(toolgroupID) -> void +- client.models.retrieve(modelId) -> Model +- client.models.list() -> ModelListResponse +- client.models.register({ ...params }) -> Model +- client.models.unregister(modelId) -> void -# VectorDBs +# PostTraining Types: -- VectorDB -- VectorDBListResponse +- AlgorithmConfig +- ListPostTrainingJobsResponse +- PostTrainingJob Methods: -- client.vectorDBs.create({ ...params }) -> VectorDB -- client.vectorDBs.retrieve(vectorDBID) -> VectorDB -- client.vectorDBs.list() -> VectorDBListResponse -- client.vectorDBs.delete(vectorDBID) -> void +- client.postTraining.preferenceOptimize({ ...params }) -> PostTrainingJob +- client.postTraining.supervisedFineTune({ ...params }) -> PostTrainingJob -# Health +## Job Types: -- HealthCheckResponse +- JobListResponse +- JobArtifactsResponse +- JobStatusResponse Methods: -- client.health.check() -> HealthCheckResponse +- client.postTraining.job.list() -> Array<ListPostTrainingJobsResponse.Data> +- client.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse +- client.postTraining.job.cancel({ ...params }) -> void +- client.postTraining.job.status({ ...params }) -> JobStatusResponse -# ToolRuntime +# Providers Types: -- ToolDef -- URL -- ToolRuntimeInvokeToolResponse -- ToolRuntimeListToolsResponse +- ListProvidersResponse +- ProviderListResponse Methods: -- client.toolRuntime.invokeTool({ ...params }) -> ToolRuntimeInvokeToolResponse -- client.toolRuntime.listTools({ ...params }) -> ToolRuntimeListToolsResponse +- client.providers.retrieve(providerId) -> ProviderInfo +- client.providers.list() -> ProviderListResponse -## RagTool +# Routes Types: -- RagToolQueryContextResponse +- ListRoutesResponse +- RouteListResponse Methods: -- client.toolRuntime.ragTool.insertDocuments({ ...params }) -> void -- client.toolRuntime.ragTool.queryContext({ ...params }) -> RagToolQueryContextResponse +- client.routes.list() -> RouteListResponse -# VectorIo +# Safety Types: -- VectorIoQueryResponse +- RunShieldResponse Methods: -- client.vectorIo.insert({ ...params }) -> void -- client.vectorIo.query({ ...params }) -> VectorIoQueryResponse +- client.safety.runShield({ ...params }) -> RunShieldResponse -# Providers +# Shields Types: -- ProviderInfo -- ProviderListResponse +- ListShieldsResponse +- Shield +- ShieldListResponse Methods: -- client.providers.retrieve(providerID) -> ProviderInfo -- client.providers.list() -> ProviderListResponse +- client.shields.retrieve(identifier) -> Shield +- client.shields.list() -> ShieldListResponse +- client.shields.register({ ...params }) -> Shield -# Inspect +# SyntheticDataGeneration Types: -- InspectListRoutesResponse +- SyntheticDataGenerationResponse Methods: -- client.inspect.listRoutes() -> InspectListRoutesResponse +- client.syntheticDataGeneration.generate({ ...params }) -> SyntheticDataGenerationResponse -# Safety +# Telemetry Types: -- SafetyViolation -- SafetyRunShieldResponse +- Event +- QueryCondition +- QuerySpansResponse +- SpanWithStatus +- Trace +- TelemetryGetSpanResponse +- TelemetryGetSpanTreeResponse +- TelemetryQuerySpansResponse +- TelemetryQueryTracesResponse Methods: -- client.safety.runShield({ ...params }) -> SafetyRunShieldResponse +- client.telemetry.getSpan(traceId, spanId) -> TelemetryGetSpanResponse +- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse +- client.telemetry.getTrace(traceId) -> Trace +- client.telemetry.logEvent({ ...params }) -> void +- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse +- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse +- client.telemetry.saveSpansToDataset({ ...params }) -> void # Scoring @@ -453,22 +473,48 @@ Methods: - client.scoring.score({ ...params }) -> ScoringScoreResponse - client.scoring.scoreBatch({ ...params }) -> ScoringScoreBatchResponse -# SyntheticDataGeneration +# ScoringFunctions Types: -- SyntheticDataGenerationGenerateResponse +- ListScoringFunctionsResponse +- ScoringFn +- ScoringFnParams +- ScoringFunctionListResponse Methods: -- client.syntheticDataGeneration.generate({ ...params }) -> SyntheticDataGenerationGenerateResponse +- client.scoringFunctions.retrieve(scoringFnId) -> ScoringFn +- client.scoringFunctions.list() -> ScoringFunctionListResponse +- client.scoringFunctions.register({ ...params }) -> void -# Version +# Benchmarks + +Types: + +- Benchmark +- ListBenchmarksResponse +- BenchmarkListResponse + +Methods: + +- client.benchmarks.retrieve(benchmarkId) -> Benchmark +- client.benchmarks.list() -> BenchmarkListResponse +- client.benchmarks.register({ ...params }) -> void + +# Files Types: -- VersionRetrieveResponse +- DeleteFileResponse +- File +- ListFilesResponse +- FileContentResponse Methods: -- client.version.retrieve() -> VersionRetrieveResponse +- client.files.create({ ...params }) -> File +- client.files.retrieve(fileId) -> File +- client.files.list({ ...params }) -> ListFilesResponse +- client.files.delete(fileId) -> DeleteFileResponse +- client.files.content(fileId) -> unknown diff --git a/eslint.config.mjs b/eslint.config.mjs deleted file mode 100644 index f5793ef..0000000 --- a/eslint.config.mjs +++ /dev/null @@ -1,42 +0,0 @@ -// @ts-check -import tseslint from 'typescript-eslint'; -import unusedImports from 'eslint-plugin-unused-imports'; -import prettier from 'eslint-plugin-prettier'; - -export default tseslint.config( - { - languageOptions: { - parser: tseslint.parser, - parserOptions: { sourceType: 'module' }, - }, - files: ['**/*.ts', '**/*.mts', '**/*.cts', '**/*.js', '**/*.mjs', '**/*.cjs'], - ignores: ['dist/'], - plugins: { - '@typescript-eslint': tseslint.plugin, - 'unused-imports': unusedImports, - prettier, - }, - rules: { - 'no-unused-vars': 'off', - 'prettier/prettier': 'error', - 'unused-imports/no-unused-imports': 'error', - 'no-restricted-imports': [ - 'error', - { - patterns: [ - { - regex: '^llama-stack-client(/.*)?', - message: 'Use a relative import, not a package import.', - }, - ], - }, - ], - }, - }, - { - files: ['tests/**', 'examples/**'], - rules: { - 'no-restricted-imports': 'off', - }, - }, -); diff --git a/jest.config.ts b/jest.config.ts index 3de208f..2dfe1b2 100644 --- a/jest.config.ts +++ b/jest.config.ts @@ -8,6 +8,7 @@ const config: JestConfigWithTsJest = { }, moduleNameMapper: { '^llama-stack-client$': '/src/index.ts', + '^llama-stack-client/_shims/auto/(.*)$': '/src/_shims/auto/$1-node', '^llama-stack-client/(.*)$': '/src/$1', }, modulePathIgnorePatterns: [ @@ -15,7 +16,6 @@ const config: JestConfigWithTsJest = { '/dist/', '/deno/', '/deno_tests/', - '/packages/', ], testPathIgnorePatterns: ['scripts'], }; diff --git a/package.json b/package.json index 61f2498..8254964 100644 --- a/package.json +++ b/package.json @@ -1,8 +1,8 @@ { "name": "llama-stack-client", - "version": "0.1.0-alpha.1", + "version": "0.1.0-alpha.2", "description": "The official TypeScript library for the Llama Stack Client API", - "author": "Llama Stack Client <>", + "author": "Llama Stack Client ", "types": "dist/index.d.ts", "main": "dist/index.js", "type": "commonjs", @@ -17,53 +17,107 @@ "test": "./scripts/test", "build": "./scripts/build", "prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1", - "format": "./scripts/format", + "format": "prettier --write --cache --cache-strategy metadata . !dist", "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build && ./scripts/utils/git-swap.sh; fi", "tsn": "ts-node -r tsconfig-paths/register", "lint": "./scripts/lint", "fix": "./scripts/format" }, - "dependencies": {}, + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7" + }, "devDependencies": { - "@arethetypeswrong/cli": "^0.17.0", "@swc/core": "^1.3.102", "@swc/jest": "^0.2.29", "@types/jest": "^29.4.0", - "@types/node": "^20.17.6", - "typescript-eslint": "8.31.1", - "@typescript-eslint/eslint-plugin": "8.31.1", - "@typescript-eslint/parser": "8.31.1", - "eslint": "^9.20.1", - "eslint-plugin-prettier": "^5.4.1", - "eslint-plugin-unused-imports": "^4.1.4", + "@typescript-eslint/eslint-plugin": "^6.7.0", + "@typescript-eslint/parser": "^6.7.0", + "eslint": "^8.49.0", + "eslint-plugin-prettier": "^5.0.1", + "eslint-plugin-unused-imports": "^3.0.0", "iconv-lite": "^0.6.3", "jest": "^29.4.0", "prettier": "^3.0.0", - "publint": "^0.2.12", "ts-jest": "^29.1.0", "ts-node": "^10.5.0", - "tsc-multi": "https://github.com/stainless-api/tsc-multi/releases/download/v1.1.8/tsc-multi.tgz", + "tsc-multi": "^1.1.0", "tsconfig-paths": "^4.0.0", - "typescript": "5.8.3" + "typescript": "^4.8.2" }, + "sideEffects": [ + "./_shims/index.js", + "./_shims/index.mjs", + "./shims/node.js", + "./shims/node.mjs", + "./shims/web.js", + "./shims/web.mjs" + ], "imports": { "llama-stack-client": ".", "llama-stack-client/*": "./src/*" }, "exports": { + "./_shims/auto/*": { + "deno": { + "types": "./dist/_shims/auto/*.d.ts", + "require": "./dist/_shims/auto/*.js", + "default": "./dist/_shims/auto/*.mjs" + }, + "bun": { + "types": "./dist/_shims/auto/*.d.ts", + "require": "./dist/_shims/auto/*-bun.js", + "default": "./dist/_shims/auto/*-bun.mjs" + }, + "browser": { + "types": "./dist/_shims/auto/*.d.ts", + "require": "./dist/_shims/auto/*.js", + "default": "./dist/_shims/auto/*.mjs" + }, + "worker": { + "types": "./dist/_shims/auto/*.d.ts", + "require": "./dist/_shims/auto/*.js", + "default": "./dist/_shims/auto/*.mjs" + }, + "workerd": { + "types": "./dist/_shims/auto/*.d.ts", + "require": "./dist/_shims/auto/*.js", + "default": "./dist/_shims/auto/*.mjs" + }, + "node": { + "types": "./dist/_shims/auto/*-node.d.ts", + "require": "./dist/_shims/auto/*-node.js", + "default": "./dist/_shims/auto/*-node.mjs" + }, + "types": "./dist/_shims/auto/*.d.ts", + "require": "./dist/_shims/auto/*.js", + "default": "./dist/_shims/auto/*.mjs" + }, ".": { - "import": "./dist/index.mjs", - "require": "./dist/index.js" + "require": { + "types": "./dist/index.d.ts", + "default": "./dist/index.js" + }, + "types": "./dist/index.d.mts", + "default": "./dist/index.mjs" }, "./*.mjs": { + "types": "./dist/*.d.ts", "default": "./dist/*.mjs" }, "./*.js": { + "types": "./dist/*.d.ts", "default": "./dist/*.js" }, "./*": { - "import": "./dist/*.mjs", - "require": "./dist/*.js" + "types": "./dist/*.d.ts", + "require": "./dist/*.js", + "default": "./dist/*.mjs" } } } diff --git a/release-please-config.json b/release-please-config.json index 1ebd0bd..624ed99 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -60,5 +60,8 @@ } ], "release-type": "node", - "extra-files": ["src/version.ts", "README.md"] + "extra-files": [ + "src/version.ts", + "README.md" + ] } diff --git a/scripts/build b/scripts/build index 8804525..3bf4e0f 100755 --- a/scripts/build +++ b/scripts/build @@ -15,27 +15,32 @@ rm -rf dist; mkdir dist # Copy src to dist/src and build from dist/src into dist, so that # the source map for index.js.map will refer to ./src/index.ts etc cp -rp src README.md dist +rm dist/src/_shims/*-deno.ts dist/src/_shims/auto/*-deno.ts for file in LICENSE CHANGELOG.md; do if [ -e "${file}" ]; then cp "${file}" dist; fi done if [ -e "bin/cli" ]; then - mkdir -p dist/bin + mkdir dist/bin cp -p "bin/cli" dist/bin/; fi -if [ -e "bin/migration-config.json" ]; then - mkdir -p dist/bin - cp -p "bin/migration-config.json" dist/bin/; -fi # this converts the export map paths for the dist directory # and does a few other minor things node scripts/utils/make-dist-package-json.cjs > dist/package.json # build to .js/.mjs/.d.ts files ./node_modules/.bin/tsc-multi -# we need to patch index.js so that `new module.exports()` works for cjs backwards -# compat. No way to get that from index.ts because it would cause compile errors +# copy over handwritten .js/.mjs/.d.ts files +cp src/_shims/*.{d.ts,js,mjs,md} dist/_shims +cp src/_shims/auto/*.{d.ts,js,mjs} dist/_shims/auto +# we need to add exports = module.exports = LlamaStackClient to index.js; +# No way to get that from index.ts because it would cause compile errors # when building .mjs node scripts/utils/fix-index-exports.cjs +# with "moduleResolution": "nodenext", if ESM resolves to index.d.ts, +# it'll have TS errors on the default import. But if it resolves to +# index.d.mts the default import will work (even though both files have +# the same export default statement) +cp dist/index.d.ts dist/index.d.mts cp tsconfig.dist-src.json dist/src/tsconfig.json node scripts/utils/postprocess-files.cjs diff --git a/scripts/format b/scripts/format index 7a75640..a6bb9d0 100755 --- a/scripts/format +++ b/scripts/format @@ -5,8 +5,4 @@ set -e cd "$(dirname "$0")/.." echo "==> Running eslint --fix" -./node_modules/.bin/eslint --fix . - -echo "==> Running prettier --write" -# format things eslint didn't -./node_modules/.bin/prettier --write --cache --cache-strategy metadata . '!**/dist' '!**/*.ts' '!**/*.mts' '!**/*.cts' '!**/*.js' '!**/*.mjs' '!**/*.cjs' +ESLINT_USE_FLAT_CONFIG="false" ./node_modules/.bin/eslint --fix --ext ts,js . diff --git a/scripts/lint b/scripts/lint index 3ffb78a..6ba75df 100755 --- a/scripts/lint +++ b/scripts/lint @@ -5,17 +5,7 @@ set -e cd "$(dirname "$0")/.." echo "==> Running eslint" -./node_modules/.bin/eslint . +ESLINT_USE_FLAT_CONFIG="false" ./node_modules/.bin/eslint --ext ts,js . -echo "==> Building" -./scripts/build - -echo "==> Checking types" -./node_modules/typescript/bin/tsc - -echo "==> Running Are The Types Wrong?" -./node_modules/.bin/attw --pack dist -f json >.attw.json || true -node scripts/utils/attw-report.cjs - -echo "==> Running publint" -./node_modules/.bin/publint dist +echo "==> Running tsc" +./node_modules/.bin/tsc --noEmit diff --git a/scripts/utils/attw-report.cjs b/scripts/utils/attw-report.cjs deleted file mode 100644 index b3477c0..0000000 --- a/scripts/utils/attw-report.cjs +++ /dev/null @@ -1,24 +0,0 @@ -const fs = require('fs'); -const problems = Object.values(JSON.parse(fs.readFileSync('.attw.json', 'utf-8')).problems) - .flat() - .filter( - (problem) => - !( - // This is intentional, if the user specifies .mjs they get ESM. - ( - (problem.kind === 'CJSResolvesToESM' && problem.entrypoint.endsWith('.mjs')) || - // This is intentional for backwards compat reasons. - (problem.kind === 'MissingExportEquals' && problem.implementationFileName.endsWith('/index.js')) || - // this is intentional, we deliberately attempt to import types that may not exist from parent node_modules - // folders to better support various runtimes without triggering automatic type acquisition. - (problem.kind === 'InternalResolutionError' && problem.moduleSpecifier.includes('node_modules')) - ) - ), - ); -fs.unlinkSync('.attw.json'); -if (problems.length) { - process.stdout.write('The types are wrong!\n' + JSON.stringify(problems, null, 2) + '\n'); - process.exitCode = 1; -} else { - process.stdout.write('Types ok!\n'); -} diff --git a/scripts/utils/fix-index-exports.cjs b/scripts/utils/fix-index-exports.cjs index e5e10b3..72b0b8f 100644 --- a/scripts/utils/fix-index-exports.cjs +++ b/scripts/utils/fix-index-exports.cjs @@ -8,10 +8,7 @@ const indexJs = let before = fs.readFileSync(indexJs, 'utf8'); let after = before.replace( - /^(\s*Object\.defineProperty\s*\(exports,\s*["']__esModule["'].+)$/m, - `exports = module.exports = function (...args) { - return new exports.default(...args) - } - $1`.replace(/^ /gm, ''), + /^\s*exports\.default\s*=\s*(\w+)/m, + 'exports = module.exports = $1;\nexports.default = $1', ); fs.writeFileSync(indexJs, after, 'utf8'); diff --git a/scripts/utils/postprocess-files.cjs b/scripts/utils/postprocess-files.cjs index deae575..2451f6d 100644 --- a/scripts/utils/postprocess-files.cjs +++ b/scripts/utils/postprocess-files.cjs @@ -1,11 +1,98 @@ -// @ts-check const fs = require('fs'); const path = require('path'); +const { parse } = require('@typescript-eslint/parser'); + +const pkgImportPath = process.env['PKG_IMPORT_PATH'] ?? 'llama-stack-client/'; const distDir = process.env['DIST_PATH'] ? path.resolve(process.env['DIST_PATH']) : path.resolve(__dirname, '..', '..', 'dist'); +const distSrcDir = path.join(distDir, 'src'); + +/** + * Quick and dirty AST traversal + */ +function traverse(node, visitor) { + if (!node || typeof node.type !== 'string') return; + visitor.node?.(node); + visitor[node.type]?.(node); + for (const key in node) { + const value = node[key]; + if (Array.isArray(value)) { + for (const elem of value) traverse(elem, visitor); + } else if (value instanceof Object) { + traverse(value, visitor); + } + } +} + +/** + * Helper method for replacing arbitrary ranges of text in input code. + * + * The `replacer` is a function that will be called with a mini-api. For example: + * + * replaceRanges('foobar', ({ replace }) => replace([0, 3], 'baz')) // 'bazbar' + * + * The replaced ranges must not be overlapping. + */ +function replaceRanges(code, replacer) { + const replacements = []; + replacer({ replace: (range, replacement) => replacements.push({ range, replacement }) }); + + if (!replacements.length) return code; + replacements.sort((a, b) => a.range[0] - b.range[0]); + const overlapIndex = replacements.findIndex( + (r, index) => index > 0 && replacements[index - 1].range[1] > r.range[0], + ); + if (overlapIndex >= 0) { + throw new Error( + `replacements overlap: ${JSON.stringify(replacements[overlapIndex - 1])} and ${JSON.stringify( + replacements[overlapIndex], + )}`, + ); + } + + const parts = []; + let end = 0; + for (const { + range: [from, to], + replacement, + } of replacements) { + if (from > end) parts.push(code.substring(end, from)); + parts.push(replacement); + end = to; + } + if (end < code.length) parts.push(code.substring(end)); + return parts.join(''); +} + +/** + * Like calling .map(), where the iteratee is called on the path in every import or export from statement. + * @returns the transformed code + */ +function mapModulePaths(code, iteratee) { + const ast = parse(code, { range: true }); + return replaceRanges(code, ({ replace }) => + traverse(ast, { + node(node) { + switch (node.type) { + case 'ImportDeclaration': + case 'ExportNamedDeclaration': + case 'ExportAllDeclaration': + case 'ImportExpression': + if (node.source) { + const { range, value } = node.source; + const transformed = iteratee(value); + if (transformed !== value) { + replace(range, JSON.stringify(transformed)); + } + } + } + }, + }), + ); +} async function* walk(dir) { for await (const d of await fs.promises.opendir(dir)) { @@ -16,79 +103,63 @@ async function* walk(dir) { } async function postprocess() { - for await (const file of walk(distDir)) { - if (!/(\.d)?[cm]?ts$/.test(file)) continue; + for await (const file of walk(path.resolve(__dirname, '..', '..', 'dist'))) { + if (!/\.([cm]?js|(\.d)?[cm]?ts)$/.test(file)) continue; const code = await fs.promises.readFile(file, 'utf8'); - // strip out lib="dom", types="node", and types="react" references; these - // are needed at build time, but would pollute the user's TS environment - const transformed = code.replace( - /^ *\/\/\/ * { + if (file.startsWith(distSrcDir)) { + if (importPath.startsWith(pkgImportPath)) { + // convert self-references in dist/src to relative paths + let relativePath = path.relative( + path.dirname(file), + path.join(distSrcDir, importPath.substring(pkgImportPath.length)), + ); + if (!relativePath.startsWith('.')) relativePath = `./${relativePath}`; + return relativePath; + } + return importPath; + } + if (importPath.startsWith('.')) { + // add explicit file extensions to relative imports + const { dir, name } = path.parse(importPath); + const ext = /\.mjs$/.test(file) ? '.mjs' : '.js'; + return `${dir}/${name}${ext}`; + } + return importPath; + }); + + if (file.startsWith(distSrcDir) && !file.endsWith('_shims/index.d.ts')) { + // strip out `unknown extends Foo ? never :` shim guards in dist/src + // to prevent errors from appearing in Go To Source + transformed = transformed.replace( + new RegExp('unknown extends (typeof )?\\S+ \\? \\S+ :\\s*'.replace(/\s+/, '\\s+'), 'gm'), + // replace with same number of characters to avoid breaking source maps + (match) => ' '.repeat(match.length), + ); + } + + if (file.endsWith('.d.ts')) { + // work around bad tsc behavior + // if we have `import { type Readable } from 'llama-stack-client/_shims/index'`, + // tsc sometimes replaces `Readable` with `import("stream").Readable` inline + // in the output .d.ts + transformed = transformed.replace(/import\("stream"\).Readable/g, 'Readable'); + } + + // strip out lib="dom" and types="node" references; these are needed at build time, + // but would pollute the user's TS environment + transformed = transformed.replace( + /^ *\/\/\/ * ' '.repeat(match.length - 1) + '\n', ); if (transformed !== code) { - console.error(`wrote ${path.relative(process.cwd(), file)}`); await fs.promises.writeFile(file, transformed, 'utf8'); + console.error(`wrote ${path.relative(process.cwd(), file)}`); } } - - const newExports = { - '.': { - require: { - types: './index.d.ts', - default: './index.js', - }, - types: './index.d.mts', - default: './index.mjs', - }, - }; - - for (const entry of await fs.promises.readdir(distDir, { withFileTypes: true })) { - if (entry.isDirectory() && entry.name !== 'src' && entry.name !== 'internal' && entry.name !== 'bin') { - const subpath = './' + entry.name; - newExports[subpath + '/*.mjs'] = { - default: subpath + '/*.mjs', - }; - newExports[subpath + '/*.js'] = { - default: subpath + '/*.js', - }; - newExports[subpath + '/*'] = { - import: subpath + '/*.mjs', - require: subpath + '/*.js', - }; - } else if (entry.isFile() && /\.[cm]?js$/.test(entry.name)) { - const { name, ext } = path.parse(entry.name); - const subpathWithoutExt = './' + name; - const subpath = './' + entry.name; - newExports[subpathWithoutExt] ||= { import: undefined, require: undefined }; - const isModule = ext[1] === 'm'; - if (isModule) { - newExports[subpathWithoutExt].import = subpath; - } else { - newExports[subpathWithoutExt].require = subpath; - } - newExports[subpath] = { - default: subpath, - }; - } - } - await fs.promises.writeFile( - 'dist/package.json', - JSON.stringify( - Object.assign( - /** @type {Record} */ ( - JSON.parse(await fs.promises.readFile('dist/package.json', 'utf-8')) - ), - { - exports: newExports, - }, - ), - null, - 2, - ), - ); } postprocess(); diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index 55aedfa..34d5407 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -18,7 +18,7 @@ UPLOAD_RESPONSE=$(tar -cz dist | curl -v -X PUT \ if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/llama-stack-client-typescript/$SHA'\033[0m" + echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/llama-stack-client-node/$SHA'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 diff --git a/src/_shims/MultipartBody.ts b/src/_shims/MultipartBody.ts new file mode 100644 index 0000000..af3b111 --- /dev/null +++ b/src/_shims/MultipartBody.ts @@ -0,0 +1,9 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +export class MultipartBody { + constructor(public body: any) {} + get [Symbol.toStringTag](): string { + return 'MultipartBody'; + } +} diff --git a/src/_shims/README.md b/src/_shims/README.md new file mode 100644 index 0000000..8061210 --- /dev/null +++ b/src/_shims/README.md @@ -0,0 +1,46 @@ +# 👋 Wondering what everything in here does? + +`llama-stack-client` supports a wide variety of runtime environments like Node.js, Deno, Bun, browsers, and various +edge runtimes, as well as both CommonJS (CJS) and EcmaScript Modules (ESM). + +To do this, `llama-stack-client` provides shims for either using `node-fetch` when in Node (because `fetch` is still experimental there) or the global `fetch` API built into the environment when not in Node. + +It uses [conditional exports](https://nodejs.org/api/packages.html#conditional-exports) to +automatically select the correct shims for each environment. However, conditional exports are a fairly new +feature and not supported everywhere. For instance, the TypeScript `"moduleResolution": "node"` + +setting doesn't consult the `exports` map, compared to `"moduleResolution": "nodeNext"`, which does. +Unfortunately that's still the default setting, and it can result in errors like +getting the wrong raw `Response` type from `.asResponse()`, for example. + +The user can work around these issues by manually importing one of: + +- `import 'llama-stack-client/shims/node'` +- `import 'llama-stack-client/shims/web'` + +All of the code here in `_shims` handles selecting the automatic default shims or manual overrides. + +### How it works - Runtime + +Runtime shims get installed by calling `setShims` exported by `llama-stack-client/_shims/registry`. + +Manually importing `llama-stack-client/shims/node` or `llama-stack-client/shims/web`, calls `setShims` with the respective runtime shims. + +All client code imports shims from `llama-stack-client/_shims/index`, which: + +- checks if shims have been set manually +- if not, calls `setShims` with the shims from `llama-stack-client/_shims/auto/runtime` +- re-exports the installed shims from `llama-stack-client/_shims/registry`. + +`llama-stack-client/_shims/auto/runtime` exports web runtime shims. +If the `node` export condition is set, the export map replaces it with `llama-stack-client/_shims/auto/runtime-node`. + +### How it works - Type time + +All client code imports shim types from `llama-stack-client/_shims/index`, which selects the manual types from `llama-stack-client/_shims/manual-types` if they have been declared, otherwise it exports the auto types from `llama-stack-client/_shims/auto/types`. + +`llama-stack-client/_shims/manual-types` exports an empty namespace. +Manually importing `llama-stack-client/shims/node` or `llama-stack-client/shims/web` merges declarations into this empty namespace, so they get picked up by `llama-stack-client/_shims/index`. + +`llama-stack-client/_shims/auto/types` exports web type definitions. +If the `node` export condition is set, the export map replaces it with `llama-stack-client/_shims/auto/types-node`, though TS only picks this up if `"moduleResolution": "nodenext"` or `"moduleResolution": "bundler"`. diff --git a/src/_shims/auto/runtime-bun.ts b/src/_shims/auto/runtime-bun.ts new file mode 100644 index 0000000..e053254 --- /dev/null +++ b/src/_shims/auto/runtime-bun.ts @@ -0,0 +1,4 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +export * from '../bun-runtime'; diff --git a/src/_shims/auto/runtime-deno.ts b/src/_shims/auto/runtime-deno.ts new file mode 100644 index 0000000..62b7a39 --- /dev/null +++ b/src/_shims/auto/runtime-deno.ts @@ -0,0 +1,4 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +export * from '../web-runtime'; diff --git a/src/_shims/auto/runtime-node.ts b/src/_shims/auto/runtime-node.ts new file mode 100644 index 0000000..0ae2216 --- /dev/null +++ b/src/_shims/auto/runtime-node.ts @@ -0,0 +1,4 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +export * from '../node-runtime'; diff --git a/src/_shims/auto/runtime.ts b/src/_shims/auto/runtime.ts new file mode 100644 index 0000000..62b7a39 --- /dev/null +++ b/src/_shims/auto/runtime.ts @@ -0,0 +1,4 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +export * from '../web-runtime'; diff --git a/src/_shims/auto/types-deno.ts b/src/_shims/auto/types-deno.ts new file mode 100644 index 0000000..226fb15 --- /dev/null +++ b/src/_shims/auto/types-deno.ts @@ -0,0 +1,4 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +export * from '../web-types'; diff --git a/src/_shims/auto/types-node.ts b/src/_shims/auto/types-node.ts new file mode 100644 index 0000000..2625a8b --- /dev/null +++ b/src/_shims/auto/types-node.ts @@ -0,0 +1,4 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +export * from '../node-types'; diff --git a/src/_shims/auto/types.d.ts b/src/_shims/auto/types.d.ts new file mode 100644 index 0000000..d775507 --- /dev/null +++ b/src/_shims/auto/types.d.ts @@ -0,0 +1,101 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +export type Agent = any; + +// @ts-ignore +declare const _fetch: unknown extends typeof fetch ? never : typeof fetch; +export { _fetch as fetch }; + +// @ts-ignore +type _Request = unknown extends Request ? never : Request; +export { _Request as Request }; + +// @ts-ignore +type _RequestInfo = unknown extends RequestInfo ? never : RequestInfo; +export { type _RequestInfo as RequestInfo }; + +// @ts-ignore +type _RequestInit = unknown extends RequestInit ? never : RequestInit; +export { type _RequestInit as RequestInit }; + +// @ts-ignore +type _Response = unknown extends Response ? never : Response; +export { _Response as Response }; + +// @ts-ignore +type _ResponseInit = unknown extends ResponseInit ? never : ResponseInit; +export { type _ResponseInit as ResponseInit }; + +// @ts-ignore +type _ResponseType = unknown extends ResponseType ? never : ResponseType; +export { type _ResponseType as ResponseType }; + +// @ts-ignore +type _BodyInit = unknown extends BodyInit ? never : BodyInit; +export { type _BodyInit as BodyInit }; + +// @ts-ignore +type _Headers = unknown extends Headers ? never : Headers; +export { _Headers as Headers }; + +// @ts-ignore +type _HeadersInit = unknown extends HeadersInit ? never : HeadersInit; +export { type _HeadersInit as HeadersInit }; + +type EndingType = 'native' | 'transparent'; + +export interface BlobPropertyBag { + endings?: EndingType; + type?: string; +} + +export interface FilePropertyBag extends BlobPropertyBag { + lastModified?: number; +} + +export type FileFromPathOptions = Omit; + +// @ts-ignore +type _FormData = unknown extends FormData ? never : FormData; +// @ts-ignore +declare const _FormData: unknown extends typeof FormData ? never : typeof FormData; +export { _FormData as FormData }; + +// @ts-ignore +type _File = unknown extends File ? never : File; +// @ts-ignore +declare const _File: unknown extends typeof File ? never : typeof File; +export { _File as File }; + +// @ts-ignore +type _Blob = unknown extends Blob ? never : Blob; +// @ts-ignore +declare const _Blob: unknown extends typeof Blob ? never : typeof Blob; +export { _Blob as Blob }; + +export declare class Readable { + readable: boolean; + readonly readableEnded: boolean; + readonly readableFlowing: boolean | null; + readonly readableHighWaterMark: number; + readonly readableLength: number; + readonly readableObjectMode: boolean; + destroyed: boolean; + read(size?: number): any; + pause(): this; + resume(): this; + isPaused(): boolean; + destroy(error?: Error): this; + [Symbol.asyncIterator](): AsyncIterableIterator; +} + +export declare class FsReadStream extends Readable { + path: {}; // node type is string | Buffer +} + +// @ts-ignore +type _ReadableStream = unknown extends ReadableStream ? never : ReadableStream; +// @ts-ignore +declare const _ReadableStream: unknown extends typeof ReadableStream ? never : typeof ReadableStream; +export { _ReadableStream as ReadableStream }; diff --git a/src/_shims/auto/types.js b/src/_shims/auto/types.js new file mode 100644 index 0000000..ddbdb79 --- /dev/null +++ b/src/_shims/auto/types.js @@ -0,0 +1,3 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ diff --git a/src/_shims/auto/types.mjs b/src/_shims/auto/types.mjs new file mode 100644 index 0000000..ddbdb79 --- /dev/null +++ b/src/_shims/auto/types.mjs @@ -0,0 +1,3 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ diff --git a/src/_shims/bun-runtime.ts b/src/_shims/bun-runtime.ts new file mode 100644 index 0000000..8d5aaab --- /dev/null +++ b/src/_shims/bun-runtime.ts @@ -0,0 +1,14 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +import { type Shims } from './registry'; +import { getRuntime as getWebRuntime } from './web-runtime'; +import { ReadStream as FsReadStream } from 'node:fs'; + +export function getRuntime(): Shims { + const runtime = getWebRuntime(); + function isFsReadStream(value: any): value is FsReadStream { + return value instanceof FsReadStream; + } + return { ...runtime, isFsReadStream }; +} diff --git a/src/_shims/index-deno.ts b/src/_shims/index-deno.ts new file mode 100644 index 0000000..71182fb --- /dev/null +++ b/src/_shims/index-deno.ts @@ -0,0 +1,112 @@ +import { MultipartBody } from './MultipartBody'; +import { type RequestOptions } from '../core'; + +export const kind: string = 'web'; + +export type Agent = any; + +const _fetch = fetch; +type _fetch = typeof fetch; +export { _fetch as fetch }; + +const _Request = Request; +type _Request = Request; +export { _Request as Request }; + +type _RequestInfo = RequestInfo; +export { type _RequestInfo as RequestInfo }; + +type _RequestInit = RequestInit; +export { type _RequestInit as RequestInit }; + +const _Response = Response; +type _Response = Response; +export { _Response as Response }; + +type _ResponseInit = ResponseInit; +export { type _ResponseInit as ResponseInit }; + +type _ResponseType = ResponseType; +export { type _ResponseType as ResponseType }; + +type _BodyInit = BodyInit; +export { type _BodyInit as BodyInit }; + +const _Headers = Headers; +type _Headers = Headers; +export { _Headers as Headers }; + +type _HeadersInit = HeadersInit; +export { type _HeadersInit as HeadersInit }; + +type EndingType = 'native' | 'transparent'; + +export interface BlobPropertyBag { + endings?: EndingType; + type?: string; +} + +export interface FilePropertyBag extends BlobPropertyBag { + lastModified?: number; +} + +export type FileFromPathOptions = Omit; + +const _FormData = FormData; +type _FormData = FormData; +export { _FormData as FormData }; + +const _File = File; +type _File = File; +export { _File as File }; + +const _Blob = Blob; +type _Blob = Blob; +export { _Blob as Blob }; + +export async function getMultipartRequestOptions>( + form: FormData, + opts: RequestOptions, +): Promise> { + return { + ...opts, + body: new MultipartBody(form) as any, + }; +} + +export function getDefaultAgent(url: string) { + return undefined; +} +export function fileFromPath() { + throw new Error( + 'The `fileFromPath` function is only supported in Node. See the README for more details: https://www.github.com/llamastack/llama-stack-client-typescript#file-uploads', + ); +} + +export const isFsReadStream = (value: any) => false; + +export declare class Readable { + readable: boolean; + readonly readableEnded: boolean; + readonly readableFlowing: boolean | null; + readonly readableHighWaterMark: number; + readonly readableLength: number; + readonly readableObjectMode: boolean; + destroyed: boolean; + read(size?: number): any; + pause(): this; + resume(): this; + isPaused(): boolean; + destroy(error?: Error): this; + [Symbol.asyncIterator](): AsyncIterableIterator; +} + +export declare class FsReadStream extends Readable { + path: {}; // node type is string | Buffer +} + +const _ReadableStream = ReadableStream; +type _ReadableStream = ReadableStream; +export { _ReadableStream as ReadableStream }; + +export const init = () => {}; diff --git a/src/_shims/index.d.ts b/src/_shims/index.d.ts new file mode 100644 index 0000000..5fb2bf0 --- /dev/null +++ b/src/_shims/index.d.ts @@ -0,0 +1,83 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +import { manual } from './manual-types'; +import * as auto from 'llama-stack-client/_shims/auto/types'; +import { type RequestOptions } from '../core'; + +type SelectType = unknown extends Manual ? Auto : Manual; + +export const kind: string; + +// @ts-ignore +export type Agent = SelectType; + +// @ts-ignore +export const fetch: SelectType; + +// @ts-ignore +export type Request = SelectType; +// @ts-ignore +export type RequestInfo = SelectType; +// @ts-ignore +export type RequestInit = SelectType; + +// @ts-ignore +export type Response = SelectType; +// @ts-ignore +export type ResponseInit = SelectType; +// @ts-ignore +export type ResponseType = SelectType; +// @ts-ignore +export type BodyInit = SelectType; +// @ts-ignore +export type Headers = SelectType; +// @ts-ignore +export const Headers: SelectType; +// @ts-ignore +export type HeadersInit = SelectType; + +// @ts-ignore +export type BlobPropertyBag = SelectType; +// @ts-ignore +export type FilePropertyBag = SelectType; +// @ts-ignore +export type FileFromPathOptions = SelectType; +// @ts-ignore +export type FormData = SelectType; +// @ts-ignore +export const FormData: SelectType; +// @ts-ignore +export type File = SelectType; +// @ts-ignore +export const File: SelectType; +// @ts-ignore +export type Blob = SelectType; +// @ts-ignore +export const Blob: SelectType; + +// @ts-ignore +export type Readable = SelectType; +// @ts-ignore +export type FsReadStream = SelectType; +// @ts-ignore +export type ReadableStream = SelectType; +// @ts-ignore +export const ReadableStream: SelectType; + +export function getMultipartRequestOptions>( + form: FormData, + opts: RequestOptions, +): Promise>; + +export function getDefaultAgent(url: string): any; + +// @ts-ignore +export type FileFromPathOptions = SelectType; + +export function fileFromPath(path: string, options?: FileFromPathOptions): Promise; +export function fileFromPath(path: string, filename?: string, options?: FileFromPathOptions): Promise; + +export function isFsReadStream(value: any): value is FsReadStream; + +export const init: () => void; diff --git a/src/_shims/index.js b/src/_shims/index.js new file mode 100644 index 0000000..b69d102 --- /dev/null +++ b/src/_shims/index.js @@ -0,0 +1,17 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +const shims = require('./registry'); +const auto = require('llama-stack-client/_shims/auto/runtime'); +exports.init = () => { + if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +}; +for (const property of Object.keys(shims)) { + Object.defineProperty(exports, property, { + get() { + return shims[property]; + }, + }); +} + +exports.init(); diff --git a/src/_shims/index.mjs b/src/_shims/index.mjs new file mode 100644 index 0000000..4b95f54 --- /dev/null +++ b/src/_shims/index.mjs @@ -0,0 +1,11 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +import * as shims from './registry.mjs'; +import * as auto from 'llama-stack-client/_shims/auto/runtime'; +export const init = () => { + if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +}; +export * from './registry.mjs'; + +init(); diff --git a/src/_shims/manual-types.d.ts b/src/_shims/manual-types.d.ts new file mode 100644 index 0000000..5a942fc --- /dev/null +++ b/src/_shims/manual-types.d.ts @@ -0,0 +1,12 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +/** + * Types will get added to this namespace when you import one of the following: + * + * import 'llama-stack-client/shims/node' + * import 'llama-stack-client/shims/web' + * + * Importing more than one will cause type and runtime errors. + */ +export namespace manual {} diff --git a/src/_shims/manual-types.js b/src/_shims/manual-types.js new file mode 100644 index 0000000..ddbdb79 --- /dev/null +++ b/src/_shims/manual-types.js @@ -0,0 +1,3 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ diff --git a/src/_shims/manual-types.mjs b/src/_shims/manual-types.mjs new file mode 100644 index 0000000..ddbdb79 --- /dev/null +++ b/src/_shims/manual-types.mjs @@ -0,0 +1,3 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ diff --git a/src/_shims/node-runtime.ts b/src/_shims/node-runtime.ts new file mode 100644 index 0000000..ab9f2ab --- /dev/null +++ b/src/_shims/node-runtime.ts @@ -0,0 +1,81 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +import * as nf from 'node-fetch'; +import * as fd from 'formdata-node'; +import { type File, type FilePropertyBag } from 'formdata-node'; +import KeepAliveAgent from 'agentkeepalive'; +import { AbortController as AbortControllerPolyfill } from 'abort-controller'; +import { ReadStream as FsReadStream } from 'node:fs'; +import { type Agent } from 'node:http'; +import { FormDataEncoder } from 'form-data-encoder'; +import { Readable } from 'node:stream'; +import { type RequestOptions } from '../core'; +import { MultipartBody } from './MultipartBody'; +import { type Shims } from './registry'; +import { ReadableStream } from 'node:stream/web'; + +type FileFromPathOptions = Omit; + +let fileFromPathWarned = false; + +/** + * @deprecated use fs.createReadStream('./my/file.txt') instead + */ +async function fileFromPath(path: string): Promise; +async function fileFromPath(path: string, filename?: string): Promise; +async function fileFromPath(path: string, options?: FileFromPathOptions): Promise; +async function fileFromPath(path: string, filename?: string, options?: FileFromPathOptions): Promise; +async function fileFromPath(path: string, ...args: any[]): Promise { + // this import fails in environments that don't handle export maps correctly, like old versions of Jest + const { fileFromPath: _fileFromPath } = await import('formdata-node/file-from-path'); + + if (!fileFromPathWarned) { + console.warn(`fileFromPath is deprecated; use fs.createReadStream(${JSON.stringify(path)}) instead`); + fileFromPathWarned = true; + } + // @ts-ignore + return await _fileFromPath(path, ...args); +} + +const defaultHttpAgent: Agent = new KeepAliveAgent({ keepAlive: true, timeout: 5 * 60 * 1000 }); +const defaultHttpsAgent: Agent = new KeepAliveAgent.HttpsAgent({ keepAlive: true, timeout: 5 * 60 * 1000 }); + +async function getMultipartRequestOptions>( + form: fd.FormData, + opts: RequestOptions, +): Promise> { + const encoder = new FormDataEncoder(form); + const readable = Readable.from(encoder); + const body = new MultipartBody(readable); + const headers = { + ...opts.headers, + ...encoder.headers, + 'Content-Length': encoder.contentLength, + }; + + return { ...opts, body: body as any, headers }; +} + +export function getRuntime(): Shims { + // Polyfill global object if needed. + if (typeof AbortController === 'undefined') { + // @ts-expect-error (the types are subtly different, but compatible in practice) + globalThis.AbortController = AbortControllerPolyfill; + } + return { + kind: 'node', + fetch: nf.default, + Request: nf.Request, + Response: nf.Response, + Headers: nf.Headers, + FormData: fd.FormData, + Blob: fd.Blob, + File: fd.File, + ReadableStream, + getMultipartRequestOptions, + getDefaultAgent: (url: string): Agent => (url.startsWith('https') ? defaultHttpsAgent : defaultHttpAgent), + fileFromPath, + isFsReadStream: (value: any): value is FsReadStream => value instanceof FsReadStream, + }; +} diff --git a/src/_shims/node-types.d.ts b/src/_shims/node-types.d.ts new file mode 100644 index 0000000..c159e5f --- /dev/null +++ b/src/_shims/node-types.d.ts @@ -0,0 +1,42 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +import * as nf from 'node-fetch'; +import * as fd from 'formdata-node'; + +export { type Agent } from 'node:http'; +export { type Readable } from 'node:stream'; +export { type ReadStream as FsReadStream } from 'node:fs'; +export { ReadableStream } from 'node:stream/web'; + +export const fetch: typeof nf.default; + +export type Request = nf.Request; +export type RequestInfo = nf.RequestInfo; +export type RequestInit = nf.RequestInit; + +export type Response = nf.Response; +export type ResponseInit = nf.ResponseInit; +export type ResponseType = nf.ResponseType; +export type BodyInit = nf.BodyInit; +export type Headers = nf.Headers; +export type HeadersInit = nf.HeadersInit; + +type EndingType = 'native' | 'transparent'; +export interface BlobPropertyBag { + endings?: EndingType; + type?: string; +} + +export interface FilePropertyBag extends BlobPropertyBag { + lastModified?: number; +} + +export type FileFromPathOptions = Omit; + +export type FormData = fd.FormData; +export const FormData: typeof fd.FormData; +export type File = fd.File; +export const File: typeof fd.File; +export type Blob = fd.Blob; +export const Blob: typeof fd.Blob; diff --git a/src/_shims/node-types.js b/src/_shims/node-types.js new file mode 100644 index 0000000..ddbdb79 --- /dev/null +++ b/src/_shims/node-types.js @@ -0,0 +1,3 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ diff --git a/src/_shims/node-types.mjs b/src/_shims/node-types.mjs new file mode 100644 index 0000000..ddbdb79 --- /dev/null +++ b/src/_shims/node-types.mjs @@ -0,0 +1,3 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ diff --git a/src/_shims/registry.ts b/src/_shims/registry.ts new file mode 100644 index 0000000..9a54e61 --- /dev/null +++ b/src/_shims/registry.ts @@ -0,0 +1,67 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +import { type RequestOptions } from '../core'; + +export interface Shims { + kind: string; + fetch: any; + Request: any; + Response: any; + Headers: any; + FormData: any; + Blob: any; + File: any; + ReadableStream: any; + getMultipartRequestOptions: >( + form: Shims['FormData'], + opts: RequestOptions, + ) => Promise>; + getDefaultAgent: (url: string) => any; + fileFromPath: + | ((path: string, filename?: string, options?: {}) => Promise) + | ((path: string, options?: {}) => Promise); + isFsReadStream: (value: any) => boolean; +} + +export let auto = false; +export let kind: Shims['kind'] | undefined = undefined; +export let fetch: Shims['fetch'] | undefined = undefined; +export let Request: Shims['Request'] | undefined = undefined; +export let Response: Shims['Response'] | undefined = undefined; +export let Headers: Shims['Headers'] | undefined = undefined; +export let FormData: Shims['FormData'] | undefined = undefined; +export let Blob: Shims['Blob'] | undefined = undefined; +export let File: Shims['File'] | undefined = undefined; +export let ReadableStream: Shims['ReadableStream'] | undefined = undefined; +export let getMultipartRequestOptions: Shims['getMultipartRequestOptions'] | undefined = undefined; +export let getDefaultAgent: Shims['getDefaultAgent'] | undefined = undefined; +export let fileFromPath: Shims['fileFromPath'] | undefined = undefined; +export let isFsReadStream: Shims['isFsReadStream'] | undefined = undefined; + +export function setShims(shims: Shims, options: { auto: boolean } = { auto: false }) { + if (auto) { + throw new Error( + `you must \`import 'llama-stack-client/shims/${shims.kind}'\` before importing anything else from llama-stack-client`, + ); + } + if (kind) { + throw new Error( + `can't \`import 'llama-stack-client/shims/${shims.kind}'\` after \`import 'llama-stack-client/shims/${kind}'\``, + ); + } + auto = options.auto; + kind = shims.kind; + fetch = shims.fetch; + Request = shims.Request; + Response = shims.Response; + Headers = shims.Headers; + FormData = shims.FormData; + Blob = shims.Blob; + File = shims.File; + ReadableStream = shims.ReadableStream; + getMultipartRequestOptions = shims.getMultipartRequestOptions; + getDefaultAgent = shims.getDefaultAgent; + fileFromPath = shims.fileFromPath; + isFsReadStream = shims.isFsReadStream; +} diff --git a/src/_shims/web-runtime.ts b/src/_shims/web-runtime.ts new file mode 100644 index 0000000..8237f0e --- /dev/null +++ b/src/_shims/web-runtime.ts @@ -0,0 +1,103 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +import { MultipartBody } from './MultipartBody'; +import { type RequestOptions } from '../core'; +import { type Shims } from './registry'; + +export function getRuntime({ manuallyImported }: { manuallyImported?: boolean } = {}): Shims { + const recommendation = + manuallyImported ? + `You may need to use polyfills` + : `Add one of these imports before your first \`import … from 'llama-stack-client'\`: +- \`import 'llama-stack-client/shims/node'\` (if you're running on Node) +- \`import 'llama-stack-client/shims/web'\` (otherwise) +`; + + let _fetch, _Request, _Response, _Headers; + try { + // @ts-ignore + _fetch = fetch; + // @ts-ignore + _Request = Request; + // @ts-ignore + _Response = Response; + // @ts-ignore + _Headers = Headers; + } catch (error) { + throw new Error( + `this environment is missing the following Web Fetch API type: ${ + (error as any).message + }. ${recommendation}`, + ); + } + + return { + kind: 'web', + fetch: _fetch, + Request: _Request, + Response: _Response, + Headers: _Headers, + FormData: + // @ts-ignore + typeof FormData !== 'undefined' ? FormData : ( + class FormData { + // @ts-ignore + constructor() { + throw new Error( + `file uploads aren't supported in this environment yet as 'FormData' is undefined. ${recommendation}`, + ); + } + } + ), + Blob: + typeof Blob !== 'undefined' ? Blob : ( + class Blob { + constructor() { + throw new Error( + `file uploads aren't supported in this environment yet as 'Blob' is undefined. ${recommendation}`, + ); + } + } + ), + File: + // @ts-ignore + typeof File !== 'undefined' ? File : ( + class File { + // @ts-ignore + constructor() { + throw new Error( + `file uploads aren't supported in this environment yet as 'File' is undefined. ${recommendation}`, + ); + } + } + ), + ReadableStream: + // @ts-ignore + typeof ReadableStream !== 'undefined' ? ReadableStream : ( + class ReadableStream { + // @ts-ignore + constructor() { + throw new Error( + `streaming isn't supported in this environment yet as 'ReadableStream' is undefined. ${recommendation}`, + ); + } + } + ), + getMultipartRequestOptions: async >( + // @ts-ignore + form: FormData, + opts: RequestOptions, + ): Promise> => ({ + ...opts, + body: new MultipartBody(form) as any, + }), + getDefaultAgent: (url: string) => undefined, + fileFromPath: () => { + throw new Error( + 'The `fileFromPath` function is only supported in Node. See the README for more details: https://www.github.com/llamastack/llama-stack-client-typescript#file-uploads', + ); + }, + isFsReadStream: (value: any) => false, + }; +} diff --git a/src/_shims/web-types.d.ts b/src/_shims/web-types.d.ts new file mode 100644 index 0000000..4ff3513 --- /dev/null +++ b/src/_shims/web-types.d.ts @@ -0,0 +1,83 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ +export type Agent = any; + +declare const _fetch: typeof fetch; +export { _fetch as fetch }; + +type _Request = Request; +export { _Request as Request }; + +type _RequestInfo = RequestInfo; +export { type _RequestInfo as RequestInfo }; + +type _RequestInit = RequestInit; +export { type _RequestInit as RequestInit }; + +type _Response = Response; +export { _Response as Response }; + +type _ResponseInit = ResponseInit; +export { type _ResponseInit as ResponseInit }; + +type _ResponseType = ResponseType; +export { type _ResponseType as ResponseType }; + +type _BodyInit = BodyInit; +export { type _BodyInit as BodyInit }; + +type _Headers = Headers; +export { _Headers as Headers }; + +type _HeadersInit = HeadersInit; +export { type _HeadersInit as HeadersInit }; + +type EndingType = 'native' | 'transparent'; + +export interface BlobPropertyBag { + endings?: EndingType; + type?: string; +} + +export interface FilePropertyBag extends BlobPropertyBag { + lastModified?: number; +} + +export type FileFromPathOptions = Omit; + +type _FormData = FormData; +declare const _FormData: typeof FormData; +export { _FormData as FormData }; + +type _File = File; +declare const _File: typeof File; +export { _File as File }; + +type _Blob = Blob; +declare const _Blob: typeof Blob; +export { _Blob as Blob }; + +export declare class Readable { + readable: boolean; + readonly readableEnded: boolean; + readonly readableFlowing: boolean | null; + readonly readableHighWaterMark: number; + readonly readableLength: number; + readonly readableObjectMode: boolean; + destroyed: boolean; + read(size?: number): any; + pause(): this; + resume(): this; + isPaused(): boolean; + destroy(error?: Error): this; + [Symbol.asyncIterator](): AsyncIterableIterator; +} + +export declare class FsReadStream extends Readable { + path: {}; // node type is string | Buffer +} + +type _ReadableStream = ReadableStream; +declare const _ReadableStream: typeof ReadableStream; +export { _ReadableStream as ReadableStream }; diff --git a/src/_shims/web-types.js b/src/_shims/web-types.js new file mode 100644 index 0000000..ddbdb79 --- /dev/null +++ b/src/_shims/web-types.js @@ -0,0 +1,3 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ diff --git a/src/_shims/web-types.mjs b/src/_shims/web-types.mjs new file mode 100644 index 0000000..ddbdb79 --- /dev/null +++ b/src/_shims/web-types.mjs @@ -0,0 +1,3 @@ +/** + * Disclaimer: modules in _shims aren't intended to be imported by SDK users. + */ diff --git a/src/api-promise.ts b/src/api-promise.ts deleted file mode 100644 index 8c775ee..0000000 --- a/src/api-promise.ts +++ /dev/null @@ -1,2 +0,0 @@ -/** @deprecated Import from ./core/api-promise instead */ -export * from './core/api-promise'; diff --git a/src/client.ts b/src/client.ts deleted file mode 100644 index e1601cb..0000000 --- a/src/client.ts +++ /dev/null @@ -1,1060 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import type { RequestInit, RequestInfo, BodyInit } from './internal/builtin-types'; -import type { HTTPMethod, PromiseOrValue, MergedRequestInit, FinalizedRequestInit } from './internal/types'; -import { uuid4 } from './internal/utils/uuid'; -import { validatePositiveInteger, isAbsoluteURL, safeJSON } from './internal/utils/values'; -import { sleep } from './internal/utils/sleep'; -export type { Logger, LogLevel } from './internal/utils/log'; -import { castToError, isAbortError } from './internal/errors'; -import type { APIResponseProps } from './internal/parse'; -import { getPlatformHeaders } from './internal/detect-platform'; -import * as Shims from './internal/shims'; -import * as Opts from './internal/request-options'; -import * as qs from './internal/qs'; -import { VERSION } from './version'; -import * as Errors from './core/error'; -import * as Uploads from './core/uploads'; -import * as API from './resources/index'; -import { APIPromise } from './core/api-promise'; -import { - Datasetio, - DatasetioAppendRowsParams, - DatasetioIterateRowsParams, - DatasetioIterateRowsResponse, -} from './resources/datasetio'; -import { - DataSource, - Dataset, - DatasetCreateParams, - DatasetListResponse, - Datasets, -} from './resources/datasets'; -import { Health, HealthCheckResponse } from './resources/health'; -import { - ChatCompletionResponse, - CompletionMessage, - CompletionResponse, - Inference, - InferenceBatchChatCompletionParams, - InferenceBatchChatCompletionResponse, - InferenceBatchCompletionParams, - InferenceBatchCompletionResponse, - InferenceChatCompletionParams, - InferenceCompletionParams, - InferenceEmbeddingsParams, - InferenceEmbeddingsResponse, - InterleavedContent, - InterleavedContentItem, - Message, - MetricInResponse, - ResponseFormat, - SamplingParams, - SystemMessage, - TokenLogProbs, - ToolCall, - ToolConfig, - ToolDefinition, -} from './resources/inference'; -import { Inspect, InspectListRoutesResponse } from './resources/inspect'; -import { Model, ModelCreateParams, ModelListResponse, ModelType, Models } from './resources/models'; -import { ProviderInfo, ProviderListResponse, Providers } from './resources/providers'; -import { Safety, SafetyRunShieldParams, SafetyRunShieldResponse, SafetyViolation } from './resources/safety'; -import { - Scoring, - ScoringScoreBatchParams, - ScoringScoreBatchResponse, - ScoringScoreParams, - ScoringScoreResponse, -} from './resources/scoring'; -import { - AggregationFunctionType, - ParamType, - ScoringFn, - ScoringFnParams, - ScoringFnParamsType, - ScoringFunctionCreateParams, - ScoringFunctionListResponse, - ScoringFunctions, -} from './resources/scoring-functions'; -import { Shield, ShieldCreateParams, ShieldListResponse, Shields } from './resources/shields'; -import { - SyntheticDataGeneration, - SyntheticDataGenerationGenerateParams, - SyntheticDataGenerationGenerateResponse, -} from './resources/synthetic-data-generation'; -import { - ToolGroup, - ToolgroupListResponse, - ToolgroupRegisterParams, - Toolgroups, -} from './resources/toolgroups'; -import { Tool, ToolListParams, ToolListResponse, ToolParameter, Tools } from './resources/tools'; -import { VectorDB, VectorDBCreateParams, VectorDBListResponse, VectorDBs } from './resources/vector-dbs'; -import { - VectorIo, - VectorIoInsertParams, - VectorIoQueryParams, - VectorIoQueryResponse, -} from './resources/vector-io'; -import { Version, VersionRetrieveResponse } from './resources/version'; -import { - Agent, - AgentConfig, - AgentCreateParams, - AgentCreateResponse, - AgentListResponse, - AgentListSessionsResponse, - Agents, -} from './resources/agents/agents'; -import { Eval } from './resources/eval/eval'; -import { - File, - FileCreateUploadSessionParams, - FileDeleteParams, - FileListInBucketResponse, - FileListParams, - FileListResponse, - FileRetrieveParams, - FileUpload, - Files, -} from './resources/files/files'; -import { OpenAI } from './resources/openai/openai'; -import { - PostTraining, - PostTrainingFineTuneSupervisedParams, - PostTrainingJob, - PostTrainingListJobsResponse, - PostTrainingOptimizePreferencesParams, - TrainingConfig, -} from './resources/post-training/post-training'; -import { - EventType, - StructuredLogType, - Telemetry, - TelemetryCreateEventParams, -} from './resources/telemetry/telemetry'; -import { - ToolDef, - ToolRuntime, - ToolRuntimeInvokeToolParams, - ToolRuntimeInvokeToolResponse, - ToolRuntimeListToolsParams, - ToolRuntimeListToolsResponse, - URL, -} from './resources/tool-runtime/tool-runtime'; -import { type Fetch } from './internal/builtin-types'; -import { HeadersLike, NullableHeaders, buildHeaders } from './internal/headers'; -import { FinalRequestOptions, RequestOptions } from './internal/request-options'; -import { readEnv } from './internal/utils/env'; -import { - type LogLevel, - type Logger, - formatRequestDetails, - loggerFor, - parseLogLevel, -} from './internal/utils/log'; -import { isEmptyObj } from './internal/utils/values'; - -export interface ClientOptions { - /** - * Defaults to process.env['LLAMA_STACK_CLIENT_API_KEY']. - */ - apiKey?: string | null | undefined; - - /** - * Override the default base URL for the API, e.g., "https://api.example.com/v2/" - * - * Defaults to process.env['LLAMA_STACK_CLIENT_BASE_URL']. - */ - baseURL?: string | null | undefined; - - /** - * The maximum amount of time (in milliseconds) that the client should wait for a response - * from the server before timing out a single request. - * - * Note that request timeouts are retried by default, so in a worst-case scenario you may wait - * much longer than this timeout before the promise succeeds or fails. - */ - timeout?: number | undefined; - /** - * Additional `RequestInit` options to be passed to `fetch` calls. - * Properties will be overridden by per-request `fetchOptions`. - */ - fetchOptions?: MergedRequestInit | undefined; - - /** - * Specify a custom `fetch` function implementation. - * - * If not provided, we expect that `fetch` is defined globally. - */ - fetch?: Fetch | undefined; - - /** - * The maximum number of times that the client will retry a request in case of a - * temporary failure, like a network error or a 5XX error from the server. - * - * @default 2 - */ - maxRetries?: number | undefined; - - /** - * Default headers to include with every request to the API. - * - * These can be removed in individual requests by explicitly setting the - * header to `null` in request options. - */ - defaultHeaders?: HeadersLike | undefined; - - /** - * Default query parameters to include with every request to the API. - * - * These can be removed in individual requests by explicitly setting the - * param to `undefined` in request options. - */ - defaultQuery?: Record | undefined; - - /** - * Set the log level. - * - * Defaults to process.env['LLAMA_STACK_CLIENT_LOG'] or 'warn' if it isn't set. - */ - logLevel?: LogLevel | undefined; - - /** - * Set the logger. - * - * Defaults to globalThis.console. - */ - logger?: Logger | undefined; -} - -/** - * API Client for interfacing with the Llama Stack Client API. - */ -export class LlamaStackClient { - apiKey: string | null; - - baseURL: string; - maxRetries: number; - timeout: number; - logger: Logger | undefined; - logLevel: LogLevel | undefined; - fetchOptions: MergedRequestInit | undefined; - - private fetch: Fetch; - #encoder: Opts.RequestEncoder; - protected idempotencyHeader?: string; - private _options: ClientOptions; - - /** - * API Client for interfacing with the Llama Stack Client API. - * - * @param {string | null | undefined} [opts.apiKey=process.env['LLAMA_STACK_CLIENT_API_KEY'] ?? null] - * @param {string} [opts.baseURL=process.env['LLAMA_STACK_CLIENT_BASE_URL'] ?? http://any-hosted-llama-stack.com] - Override the default base URL for the API. - * @param {number} [opts.timeout=1 minute] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. - * @param {MergedRequestInit} [opts.fetchOptions] - Additional `RequestInit` options to be passed to `fetch` calls. - * @param {Fetch} [opts.fetch] - Specify a custom `fetch` function implementation. - * @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request. - * @param {HeadersLike} opts.defaultHeaders - Default headers to include with every request to the API. - * @param {Record} opts.defaultQuery - Default query parameters to include with every request to the API. - */ - constructor({ - baseURL = readEnv('LLAMA_STACK_CLIENT_BASE_URL'), - apiKey = readEnv('LLAMA_STACK_CLIENT_API_KEY') ?? null, - ...opts - }: ClientOptions = {}) { - const options: ClientOptions = { - apiKey, - ...opts, - baseURL: baseURL || `http://any-hosted-llama-stack.com`, - }; - - this.baseURL = options.baseURL!; - this.timeout = options.timeout ?? LlamaStackClient.DEFAULT_TIMEOUT /* 1 minute */; - this.logger = options.logger ?? console; - const defaultLogLevel = 'warn'; - // Set default logLevel early so that we can log a warning in parseLogLevel. - this.logLevel = defaultLogLevel; - this.logLevel = - parseLogLevel(options.logLevel, 'ClientOptions.logLevel', this) ?? - parseLogLevel(readEnv('LLAMA_STACK_CLIENT_LOG'), "process.env['LLAMA_STACK_CLIENT_LOG']", this) ?? - defaultLogLevel; - this.fetchOptions = options.fetchOptions; - this.maxRetries = options.maxRetries ?? 2; - this.fetch = options.fetch ?? Shims.getDefaultFetch(); - this.#encoder = Opts.FallbackEncoder; - - this._options = options; - - this.apiKey = apiKey; - } - - /** - * Create a new client instance re-using the same options given to the current client with optional overriding. - */ - withOptions(options: Partial): this { - return new (this.constructor as any as new (props: ClientOptions) => typeof this)({ - ...this._options, - baseURL: this.baseURL, - maxRetries: this.maxRetries, - timeout: this.timeout, - logger: this.logger, - logLevel: this.logLevel, - fetch: this.fetch, - fetchOptions: this.fetchOptions, - apiKey: this.apiKey, - ...options, - }); - } - - /** - * Check whether the base URL is set to its default. - */ - #baseURLOverridden(): boolean { - return this.baseURL !== 'http://any-hosted-llama-stack.com'; - } - - protected defaultQuery(): Record | undefined { - return this._options.defaultQuery; - } - - protected validateHeaders({ values, nulls }: NullableHeaders) { - if (this.apiKey && values.get('authorization')) { - return; - } - if (nulls.has('authorization')) { - return; - } - - throw new Error( - 'Could not resolve authentication method. Expected the apiKey to be set. Or for the "Authorization" headers to be explicitly omitted', - ); - } - - protected authHeaders(opts: FinalRequestOptions): NullableHeaders | undefined { - if (this.apiKey == null) { - return undefined; - } - return buildHeaders([{ Authorization: `Bearer ${this.apiKey}` }]); - } - - protected stringifyQuery(query: Record): string { - return qs.stringify(query, { arrayFormat: 'comma' }); - } - - private getUserAgent(): string { - return `${this.constructor.name}/JS ${VERSION}`; - } - - protected defaultIdempotencyKey(): string { - return `stainless-node-retry-${uuid4()}`; - } - - protected makeStatusError( - status: number, - error: Object, - message: string | undefined, - headers: Headers, - ): Errors.APIError { - return Errors.APIError.generate(status, error, message, headers); - } - - buildURL( - path: string, - query: Record | null | undefined, - defaultBaseURL?: string | undefined, - ): string { - const baseURL = (!this.#baseURLOverridden() && defaultBaseURL) || this.baseURL; - const url = - isAbsoluteURL(path) ? - new URL(path) - : new URL(baseURL + (baseURL.endsWith('/') && path.startsWith('/') ? path.slice(1) : path)); - - const defaultQuery = this.defaultQuery(); - if (!isEmptyObj(defaultQuery)) { - query = { ...defaultQuery, ...query }; - } - - if (typeof query === 'object' && query && !Array.isArray(query)) { - url.search = this.stringifyQuery(query as Record); - } - - return url.toString(); - } - - /** - * Used as a callback for mutating the given `FinalRequestOptions` object. - */ - protected async prepareOptions(options: FinalRequestOptions): Promise {} - - /** - * Used as a callback for mutating the given `RequestInit` object. - * - * This is useful for cases where you want to add certain headers based off of - * the request properties, e.g. `method` or `url`. - */ - protected async prepareRequest( - request: RequestInit, - { url, options }: { url: string; options: FinalRequestOptions }, - ): Promise {} - - get(path: string, opts?: PromiseOrValue): APIPromise { - return this.methodRequest('get', path, opts); - } - - post(path: string, opts?: PromiseOrValue): APIPromise { - return this.methodRequest('post', path, opts); - } - - patch(path: string, opts?: PromiseOrValue): APIPromise { - return this.methodRequest('patch', path, opts); - } - - put(path: string, opts?: PromiseOrValue): APIPromise { - return this.methodRequest('put', path, opts); - } - - delete(path: string, opts?: PromiseOrValue): APIPromise { - return this.methodRequest('delete', path, opts); - } - - private methodRequest( - method: HTTPMethod, - path: string, - opts?: PromiseOrValue, - ): APIPromise { - return this.request( - Promise.resolve(opts).then((opts) => { - return { method, path, ...opts }; - }), - ); - } - - request( - options: PromiseOrValue, - remainingRetries: number | null = null, - ): APIPromise { - return new APIPromise(this, this.makeRequest(options, remainingRetries, undefined)); - } - - private async makeRequest( - optionsInput: PromiseOrValue, - retriesRemaining: number | null, - retryOfRequestLogID: string | undefined, - ): Promise { - const options = await optionsInput; - const maxRetries = options.maxRetries ?? this.maxRetries; - if (retriesRemaining == null) { - retriesRemaining = maxRetries; - } - - await this.prepareOptions(options); - - const { req, url, timeout } = this.buildRequest(options, { retryCount: maxRetries - retriesRemaining }); - - await this.prepareRequest(req, { url, options }); - - /** Not an API request ID, just for correlating local log entries. */ - const requestLogID = 'log_' + ((Math.random() * (1 << 24)) | 0).toString(16).padStart(6, '0'); - const retryLogStr = retryOfRequestLogID === undefined ? '' : `, retryOf: ${retryOfRequestLogID}`; - const startTime = Date.now(); - - loggerFor(this).debug( - `[${requestLogID}] sending request`, - formatRequestDetails({ - retryOfRequestLogID, - method: options.method, - url, - options, - headers: req.headers, - }), - ); - - if (options.signal?.aborted) { - throw new Errors.APIUserAbortError(); - } - - const controller = new AbortController(); - const response = await this.fetchWithTimeout(url, req, timeout, controller).catch(castToError); - const headersTime = Date.now(); - - if (response instanceof Error) { - const retryMessage = `retrying, ${retriesRemaining} attempts remaining`; - if (options.signal?.aborted) { - throw new Errors.APIUserAbortError(); - } - // detect native connection timeout errors - // deno throws "TypeError: error sending request for url (https://example/): client error (Connect): tcp connect error: Operation timed out (os error 60): Operation timed out (os error 60)" - // undici throws "TypeError: fetch failed" with cause "ConnectTimeoutError: Connect Timeout Error (attempted address: example:443, timeout: 1ms)" - // others do not provide enough information to distinguish timeouts from other connection errors - const isTimeout = - isAbortError(response) || - /timed? ?out/i.test(String(response) + ('cause' in response ? String(response.cause) : '')); - if (retriesRemaining) { - loggerFor(this).info( - `[${requestLogID}] connection ${isTimeout ? 'timed out' : 'failed'} - ${retryMessage}`, - ); - loggerFor(this).debug( - `[${requestLogID}] connection ${isTimeout ? 'timed out' : 'failed'} (${retryMessage})`, - formatRequestDetails({ - retryOfRequestLogID, - url, - durationMs: headersTime - startTime, - message: response.message, - }), - ); - return this.retryRequest(options, retriesRemaining, retryOfRequestLogID ?? requestLogID); - } - loggerFor(this).info( - `[${requestLogID}] connection ${isTimeout ? 'timed out' : 'failed'} - error; no more retries left`, - ); - loggerFor(this).debug( - `[${requestLogID}] connection ${isTimeout ? 'timed out' : 'failed'} (error; no more retries left)`, - formatRequestDetails({ - retryOfRequestLogID, - url, - durationMs: headersTime - startTime, - message: response.message, - }), - ); - if (isTimeout) { - throw new Errors.APIConnectionTimeoutError(); - } - throw new Errors.APIConnectionError({ cause: response }); - } - - const responseInfo = `[${requestLogID}${retryLogStr}] ${req.method} ${url} ${ - response.ok ? 'succeeded' : 'failed' - } with status ${response.status} in ${headersTime - startTime}ms`; - - if (!response.ok) { - const shouldRetry = this.shouldRetry(response); - if (retriesRemaining && shouldRetry) { - const retryMessage = `retrying, ${retriesRemaining} attempts remaining`; - - // We don't need the body of this response. - await Shims.CancelReadableStream(response.body); - loggerFor(this).info(`${responseInfo} - ${retryMessage}`); - loggerFor(this).debug( - `[${requestLogID}] response error (${retryMessage})`, - formatRequestDetails({ - retryOfRequestLogID, - url: response.url, - status: response.status, - headers: response.headers, - durationMs: headersTime - startTime, - }), - ); - return this.retryRequest( - options, - retriesRemaining, - retryOfRequestLogID ?? requestLogID, - response.headers, - ); - } - - const retryMessage = shouldRetry ? `error; no more retries left` : `error; not retryable`; - - loggerFor(this).info(`${responseInfo} - ${retryMessage}`); - - const errText = await response.text().catch((err: any) => castToError(err).message); - const errJSON = safeJSON(errText); - const errMessage = errJSON ? undefined : errText; - - loggerFor(this).debug( - `[${requestLogID}] response error (${retryMessage})`, - formatRequestDetails({ - retryOfRequestLogID, - url: response.url, - status: response.status, - headers: response.headers, - message: errMessage, - durationMs: Date.now() - startTime, - }), - ); - - const err = this.makeStatusError(response.status, errJSON, errMessage, response.headers); - throw err; - } - - loggerFor(this).info(responseInfo); - loggerFor(this).debug( - `[${requestLogID}] response start`, - formatRequestDetails({ - retryOfRequestLogID, - url: response.url, - status: response.status, - headers: response.headers, - durationMs: headersTime - startTime, - }), - ); - - return { response, options, controller, requestLogID, retryOfRequestLogID, startTime }; - } - - async fetchWithTimeout( - url: RequestInfo, - init: RequestInit | undefined, - ms: number, - controller: AbortController, - ): Promise { - const { signal, method, ...options } = init || {}; - if (signal) signal.addEventListener('abort', () => controller.abort()); - - const timeout = setTimeout(() => controller.abort(), ms); - - const isReadableBody = - ((globalThis as any).ReadableStream && options.body instanceof (globalThis as any).ReadableStream) || - (typeof options.body === 'object' && options.body !== null && Symbol.asyncIterator in options.body); - - const fetchOptions: RequestInit = { - signal: controller.signal as any, - ...(isReadableBody ? { duplex: 'half' } : {}), - method: 'GET', - ...options, - }; - if (method) { - // Custom methods like 'patch' need to be uppercased - // See https://github.com/nodejs/undici/issues/2294 - fetchOptions.method = method.toUpperCase(); - } - - try { - // use undefined this binding; fetch errors if bound to something else in browser/cloudflare - return await this.fetch.call(undefined, url, fetchOptions); - } finally { - clearTimeout(timeout); - } - } - - private shouldRetry(response: Response): boolean { - // Note this is not a standard header. - const shouldRetryHeader = response.headers.get('x-should-retry'); - - // If the server explicitly says whether or not to retry, obey. - if (shouldRetryHeader === 'true') return true; - if (shouldRetryHeader === 'false') return false; - - // Retry on request timeouts. - if (response.status === 408) return true; - - // Retry on lock timeouts. - if (response.status === 409) return true; - - // Retry on rate limits. - if (response.status === 429) return true; - - // Retry internal errors. - if (response.status >= 500) return true; - - return false; - } - - private async retryRequest( - options: FinalRequestOptions, - retriesRemaining: number, - requestLogID: string, - responseHeaders?: Headers | undefined, - ): Promise { - let timeoutMillis: number | undefined; - - // Note the `retry-after-ms` header may not be standard, but is a good idea and we'd like proactive support for it. - const retryAfterMillisHeader = responseHeaders?.get('retry-after-ms'); - if (retryAfterMillisHeader) { - const timeoutMs = parseFloat(retryAfterMillisHeader); - if (!Number.isNaN(timeoutMs)) { - timeoutMillis = timeoutMs; - } - } - - // About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After - const retryAfterHeader = responseHeaders?.get('retry-after'); - if (retryAfterHeader && !timeoutMillis) { - const timeoutSeconds = parseFloat(retryAfterHeader); - if (!Number.isNaN(timeoutSeconds)) { - timeoutMillis = timeoutSeconds * 1000; - } else { - timeoutMillis = Date.parse(retryAfterHeader) - Date.now(); - } - } - - // If the API asks us to wait a certain amount of time (and it's a reasonable amount), - // just do what it says, but otherwise calculate a default - if (!(timeoutMillis && 0 <= timeoutMillis && timeoutMillis < 60 * 1000)) { - const maxRetries = options.maxRetries ?? this.maxRetries; - timeoutMillis = this.calculateDefaultRetryTimeoutMillis(retriesRemaining, maxRetries); - } - await sleep(timeoutMillis); - - return this.makeRequest(options, retriesRemaining - 1, requestLogID); - } - - private calculateDefaultRetryTimeoutMillis(retriesRemaining: number, maxRetries: number): number { - const initialRetryDelay = 0.5; - const maxRetryDelay = 8.0; - - const numRetries = maxRetries - retriesRemaining; - - // Apply exponential backoff, but not more than the max. - const sleepSeconds = Math.min(initialRetryDelay * Math.pow(2, numRetries), maxRetryDelay); - - // Apply some jitter, take up to at most 25 percent of the retry time. - const jitter = 1 - Math.random() * 0.25; - - return sleepSeconds * jitter * 1000; - } - - buildRequest( - inputOptions: FinalRequestOptions, - { retryCount = 0 }: { retryCount?: number } = {}, - ): { req: FinalizedRequestInit; url: string; timeout: number } { - const options = { ...inputOptions }; - const { method, path, query, defaultBaseURL } = options; - - const url = this.buildURL(path!, query as Record, defaultBaseURL); - if ('timeout' in options) validatePositiveInteger('timeout', options.timeout); - options.timeout = options.timeout ?? this.timeout; - const { bodyHeaders, body } = this.buildBody({ options }); - const reqHeaders = this.buildHeaders({ options: inputOptions, method, bodyHeaders, retryCount }); - - const req: FinalizedRequestInit = { - method, - headers: reqHeaders, - ...(options.signal && { signal: options.signal }), - ...((globalThis as any).ReadableStream && - body instanceof (globalThis as any).ReadableStream && { duplex: 'half' }), - ...(body && { body }), - ...((this.fetchOptions as any) ?? {}), - ...((options.fetchOptions as any) ?? {}), - }; - - return { req, url, timeout: options.timeout }; - } - - private buildHeaders({ - options, - method, - bodyHeaders, - retryCount, - }: { - options: FinalRequestOptions; - method: HTTPMethod; - bodyHeaders: HeadersLike; - retryCount: number; - }): Headers { - let idempotencyHeaders: HeadersLike = {}; - if (this.idempotencyHeader && method !== 'get') { - if (!options.idempotencyKey) options.idempotencyKey = this.defaultIdempotencyKey(); - idempotencyHeaders[this.idempotencyHeader] = options.idempotencyKey; - } - - const headers = buildHeaders([ - idempotencyHeaders, - { - Accept: 'application/json', - 'User-Agent': this.getUserAgent(), - 'X-Stainless-Retry-Count': String(retryCount), - ...(options.timeout ? { 'X-Stainless-Timeout': String(Math.trunc(options.timeout / 1000)) } : {}), - ...getPlatformHeaders(), - }, - this.authHeaders(options), - this._options.defaultHeaders, - bodyHeaders, - options.headers, - ]); - - this.validateHeaders(headers); - - return headers.values; - } - - private buildBody({ options: { body, headers: rawHeaders } }: { options: FinalRequestOptions }): { - bodyHeaders: HeadersLike; - body: BodyInit | undefined; - } { - if (!body) { - return { bodyHeaders: undefined, body: undefined }; - } - const headers = buildHeaders([rawHeaders]); - if ( - // Pass raw type verbatim - ArrayBuffer.isView(body) || - body instanceof ArrayBuffer || - body instanceof DataView || - (typeof body === 'string' && - // Preserve legacy string encoding behavior for now - headers.values.has('content-type')) || - // `Blob` is superset of `File` - body instanceof Blob || - // `FormData` -> `multipart/form-data` - body instanceof FormData || - // `URLSearchParams` -> `application/x-www-form-urlencoded` - body instanceof URLSearchParams || - // Send chunked stream (each chunk has own `length`) - ((globalThis as any).ReadableStream && body instanceof (globalThis as any).ReadableStream) - ) { - return { bodyHeaders: undefined, body: body as BodyInit }; - } else if ( - typeof body === 'object' && - (Symbol.asyncIterator in body || - (Symbol.iterator in body && 'next' in body && typeof body.next === 'function')) - ) { - return { bodyHeaders: undefined, body: Shims.ReadableStreamFrom(body as AsyncIterable) }; - } else { - return this.#encoder({ body, headers }); - } - } - - static LlamaStackClient = this; - static DEFAULT_TIMEOUT = 60000; // 1 minute - - static LlamaStackClientError = Errors.LlamaStackClientError; - static APIError = Errors.APIError; - static APIConnectionError = Errors.APIConnectionError; - static APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; - static APIUserAbortError = Errors.APIUserAbortError; - static NotFoundError = Errors.NotFoundError; - static ConflictError = Errors.ConflictError; - static RateLimitError = Errors.RateLimitError; - static BadRequestError = Errors.BadRequestError; - static AuthenticationError = Errors.AuthenticationError; - static InternalServerError = Errors.InternalServerError; - static PermissionDeniedError = Errors.PermissionDeniedError; - static UnprocessableEntityError = Errors.UnprocessableEntityError; - - static toFile = Uploads.toFile; - - datasetio: API.Datasetio = new API.Datasetio(this); - inference: API.Inference = new API.Inference(this); - postTraining: API.PostTraining = new API.PostTraining(this); - agents: API.Agents = new API.Agents(this); - openai: API.OpenAI = new API.OpenAI(this); - files: API.Files = new API.Files(this); - eval: API.Eval = new API.Eval(this); - datasets: API.Datasets = new API.Datasets(this); - models: API.Models = new API.Models(this); - scoringFunctions: API.ScoringFunctions = new API.ScoringFunctions(this); - shields: API.Shields = new API.Shields(this); - telemetry: API.Telemetry = new API.Telemetry(this); - tools: API.Tools = new API.Tools(this); - toolgroups: API.Toolgroups = new API.Toolgroups(this); - vectorDBs: API.VectorDBs = new API.VectorDBs(this); - health: API.Health = new API.Health(this); - toolRuntime: API.ToolRuntime = new API.ToolRuntime(this); - vectorIo: API.VectorIo = new API.VectorIo(this); - providers: API.Providers = new API.Providers(this); - inspect: API.Inspect = new API.Inspect(this); - safety: API.Safety = new API.Safety(this); - scoring: API.Scoring = new API.Scoring(this); - syntheticDataGeneration: API.SyntheticDataGeneration = new API.SyntheticDataGeneration(this); - version: API.Version = new API.Version(this); -} -LlamaStackClient.Datasetio = Datasetio; -LlamaStackClient.Inference = Inference; -LlamaStackClient.PostTraining = PostTraining; -LlamaStackClient.Agents = Agents; -LlamaStackClient.OpenAI = OpenAI; -LlamaStackClient.Files = Files; -LlamaStackClient.Eval = Eval; -LlamaStackClient.Datasets = Datasets; -LlamaStackClient.Models = Models; -LlamaStackClient.ScoringFunctions = ScoringFunctions; -LlamaStackClient.Shields = Shields; -LlamaStackClient.Telemetry = Telemetry; -LlamaStackClient.Tools = Tools; -LlamaStackClient.Toolgroups = Toolgroups; -LlamaStackClient.VectorDBs = VectorDBs; -LlamaStackClient.Health = Health; -LlamaStackClient.ToolRuntime = ToolRuntime; -LlamaStackClient.VectorIo = VectorIo; -LlamaStackClient.Providers = Providers; -LlamaStackClient.Inspect = Inspect; -LlamaStackClient.Safety = Safety; -LlamaStackClient.Scoring = Scoring; -LlamaStackClient.SyntheticDataGeneration = SyntheticDataGeneration; -LlamaStackClient.Version = Version; -export declare namespace LlamaStackClient { - export type RequestOptions = Opts.RequestOptions; - - export { - Datasetio as Datasetio, - type DatasetioIterateRowsResponse as DatasetioIterateRowsResponse, - type DatasetioAppendRowsParams as DatasetioAppendRowsParams, - type DatasetioIterateRowsParams as DatasetioIterateRowsParams, - }; - - export { - Inference as Inference, - type ChatCompletionResponse as ChatCompletionResponse, - type CompletionMessage as CompletionMessage, - type CompletionResponse as CompletionResponse, - type InterleavedContent as InterleavedContent, - type InterleavedContentItem as InterleavedContentItem, - type Message as Message, - type MetricInResponse as MetricInResponse, - type ResponseFormat as ResponseFormat, - type SamplingParams as SamplingParams, - type SystemMessage as SystemMessage, - type TokenLogProbs as TokenLogProbs, - type ToolCall as ToolCall, - type ToolConfig as ToolConfig, - type ToolDefinition as ToolDefinition, - type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse, - type InferenceBatchCompletionResponse as InferenceBatchCompletionResponse, - type InferenceEmbeddingsResponse as InferenceEmbeddingsResponse, - type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams, - type InferenceBatchCompletionParams as InferenceBatchCompletionParams, - type InferenceChatCompletionParams as InferenceChatCompletionParams, - type InferenceCompletionParams as InferenceCompletionParams, - type InferenceEmbeddingsParams as InferenceEmbeddingsParams, - }; - - export { - PostTraining as PostTraining, - type PostTrainingJob as PostTrainingJob, - type TrainingConfig as TrainingConfig, - type PostTrainingListJobsResponse as PostTrainingListJobsResponse, - type PostTrainingFineTuneSupervisedParams as PostTrainingFineTuneSupervisedParams, - type PostTrainingOptimizePreferencesParams as PostTrainingOptimizePreferencesParams, - }; - - export { - Agents as Agents, - type Agent as Agent, - type AgentConfig as AgentConfig, - type AgentCreateResponse as AgentCreateResponse, - type AgentListResponse as AgentListResponse, - type AgentListSessionsResponse as AgentListSessionsResponse, - type AgentCreateParams as AgentCreateParams, - }; - - export { OpenAI as OpenAI }; - - export { - Files as Files, - type File as File, - type FileUpload as FileUpload, - type FileListResponse as FileListResponse, - type FileListInBucketResponse as FileListInBucketResponse, - type FileRetrieveParams as FileRetrieveParams, - type FileListParams as FileListParams, - type FileDeleteParams as FileDeleteParams, - type FileCreateUploadSessionParams as FileCreateUploadSessionParams, - }; - - export { Eval as Eval }; - - export { - Datasets as Datasets, - type DataSource as DataSource, - type Dataset as Dataset, - type DatasetListResponse as DatasetListResponse, - type DatasetCreateParams as DatasetCreateParams, - }; - - export { - Models as Models, - type Model as Model, - type ModelType as ModelType, - type ModelListResponse as ModelListResponse, - type ModelCreateParams as ModelCreateParams, - }; - - export { - ScoringFunctions as ScoringFunctions, - type AggregationFunctionType as AggregationFunctionType, - type ParamType as ParamType, - type ScoringFn as ScoringFn, - type ScoringFnParams as ScoringFnParams, - type ScoringFnParamsType as ScoringFnParamsType, - type ScoringFunctionListResponse as ScoringFunctionListResponse, - type ScoringFunctionCreateParams as ScoringFunctionCreateParams, - }; - - export { - Shields as Shields, - type Shield as Shield, - type ShieldListResponse as ShieldListResponse, - type ShieldCreateParams as ShieldCreateParams, - }; - - export { - Telemetry as Telemetry, - type EventType as EventType, - type StructuredLogType as StructuredLogType, - type TelemetryCreateEventParams as TelemetryCreateEventParams, - }; - - export { - Tools as Tools, - type Tool as Tool, - type ToolParameter as ToolParameter, - type ToolListResponse as ToolListResponse, - type ToolListParams as ToolListParams, - }; - - export { - Toolgroups as Toolgroups, - type ToolGroup as ToolGroup, - type ToolgroupListResponse as ToolgroupListResponse, - type ToolgroupRegisterParams as ToolgroupRegisterParams, - }; - - export { - VectorDBs as VectorDBs, - type VectorDB as VectorDB, - type VectorDBListResponse as VectorDBListResponse, - type VectorDBCreateParams as VectorDBCreateParams, - }; - - export { Health as Health, type HealthCheckResponse as HealthCheckResponse }; - - export { - ToolRuntime as ToolRuntime, - type ToolDef as ToolDef, - type URL as URL, - type ToolRuntimeInvokeToolResponse as ToolRuntimeInvokeToolResponse, - type ToolRuntimeListToolsResponse as ToolRuntimeListToolsResponse, - type ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams, - type ToolRuntimeListToolsParams as ToolRuntimeListToolsParams, - }; - - export { - VectorIo as VectorIo, - type VectorIoQueryResponse as VectorIoQueryResponse, - type VectorIoInsertParams as VectorIoInsertParams, - type VectorIoQueryParams as VectorIoQueryParams, - }; - - export { - Providers as Providers, - type ProviderInfo as ProviderInfo, - type ProviderListResponse as ProviderListResponse, - }; - - export { Inspect as Inspect, type InspectListRoutesResponse as InspectListRoutesResponse }; - - export { - Safety as Safety, - type SafetyViolation as SafetyViolation, - type SafetyRunShieldResponse as SafetyRunShieldResponse, - type SafetyRunShieldParams as SafetyRunShieldParams, - }; - - export { - Scoring as Scoring, - type ScoringScoreResponse as ScoringScoreResponse, - type ScoringScoreBatchResponse as ScoringScoreBatchResponse, - type ScoringScoreParams as ScoringScoreParams, - type ScoringScoreBatchParams as ScoringScoreBatchParams, - }; - - export { - SyntheticDataGeneration as SyntheticDataGeneration, - type SyntheticDataGenerationGenerateResponse as SyntheticDataGenerationGenerateResponse, - type SyntheticDataGenerationGenerateParams as SyntheticDataGenerationGenerateParams, - }; - - export { Version as Version, type VersionRetrieveResponse as VersionRetrieveResponse }; -} diff --git a/src/core.ts b/src/core.ts new file mode 100644 index 0000000..6cd9d53 --- /dev/null +++ b/src/core.ts @@ -0,0 +1,1251 @@ +import { VERSION } from './version'; +import { Stream } from './streaming'; +import { + LlamaStackClientError, + APIError, + APIConnectionError, + APIConnectionTimeoutError, + APIUserAbortError, +} from './error'; +import { + kind as shimsKind, + type Readable, + getDefaultAgent, + type Agent, + fetch, + type RequestInfo, + type RequestInit, + type Response, + type HeadersInit, + init, +} from './_shims/index'; + +// try running side effects outside of _shims/index to workaround https://github.com/vercel/next.js/issues/76881 +init(); + +export { type Response }; +import { BlobLike, isBlobLike, isMultipartBody } from './uploads'; +export { + maybeMultipartFormRequestOptions, + multipartFormRequestOptions, + createForm, + type Uploadable, +} from './uploads'; + +export type Fetch = (url: RequestInfo, init?: RequestInit) => Promise; + +/** + * An alias to the builtin `Array` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Array = Array; + +/** + * An alias to the builtin `Record` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Record = Record; + +export type { _Array as Array, _Record as Record }; + +type PromiseOrValue = T | Promise; + +type APIResponseProps = { + response: Response; + options: FinalRequestOptions; + controller: AbortController; +}; + +async function defaultParseResponse(props: APIResponseProps): Promise { + const { response } = props; + if (props.options.stream) { + debug('response', response.status, response.url, response.headers, response.body); + + // Note: there is an invariant here that isn't represented in the type system + // that if you set `stream: true` the response type must also be `Stream` + + if (props.options.__streamClass) { + return props.options.__streamClass.fromSSEResponse(response, props.controller) as any; + } + + return Stream.fromSSEResponse(response, props.controller) as any; + } + + // fetch refuses to read the body when the status code is 204. + if (response.status === 204) { + return null as T; + } + + if (props.options.__binaryResponse) { + return response as unknown as T; + } + + const contentType = response.headers.get('content-type'); + const mediaType = contentType?.split(';')[0]?.trim(); + const isJSON = mediaType?.includes('application/json') || mediaType?.endsWith('+json'); + if (isJSON) { + const json = await response.json(); + + debug('response', response.status, response.url, response.headers, json); + + return json as T; + } + + const text = await response.text(); + debug('response', response.status, response.url, response.headers, text); + + // TODO handle blob, arraybuffer, other content types, etc. + return text as unknown as T; +} + +/** + * A subclass of `Promise` providing additional helper methods + * for interacting with the SDK. + */ +export class APIPromise extends Promise { + private parsedPromise: Promise | undefined; + + constructor( + private responsePromise: Promise, + private parseResponse: (props: APIResponseProps) => PromiseOrValue = defaultParseResponse, + ) { + super((resolve) => { + // this is maybe a bit weird but this has to be a no-op to not implicitly + // parse the response body; instead .then, .catch, .finally are overridden + // to parse the response + resolve(null as any); + }); + } + + _thenUnwrap(transform: (data: T, props: APIResponseProps) => U): APIPromise { + return new APIPromise(this.responsePromise, async (props) => + transform(await this.parseResponse(props), props), + ); + } + + /** + * Gets the raw `Response` instance instead of parsing the response + * data. + * + * If you want to parse the response body but still get the `Response` + * instance, you can use {@link withResponse()}. + * + * 👋 Getting the wrong TypeScript type for `Response`? + * Try setting `"moduleResolution": "NodeNext"` if you can, + * or add one of these imports before your first `import … from 'llama-stack-client'`: + * - `import 'llama-stack-client/shims/node'` (if you're running on Node) + * - `import 'llama-stack-client/shims/web'` (otherwise) + */ + asResponse(): Promise { + return this.responsePromise.then((p) => p.response); + } + /** + * Gets the parsed response data and the raw `Response` instance. + * + * If you just want to get the raw `Response` instance without parsing it, + * you can use {@link asResponse()}. + * + * + * 👋 Getting the wrong TypeScript type for `Response`? + * Try setting `"moduleResolution": "NodeNext"` if you can, + * or add one of these imports before your first `import … from 'llama-stack-client'`: + * - `import 'llama-stack-client/shims/node'` (if you're running on Node) + * - `import 'llama-stack-client/shims/web'` (otherwise) + */ + async withResponse(): Promise<{ data: T; response: Response }> { + const [data, response] = await Promise.all([this.parse(), this.asResponse()]); + return { data, response }; + } + + private parse(): Promise { + if (!this.parsedPromise) { + this.parsedPromise = this.responsePromise.then(this.parseResponse); + } + return this.parsedPromise; + } + + override then( + onfulfilled?: ((value: T) => TResult1 | PromiseLike) | undefined | null, + onrejected?: ((reason: any) => TResult2 | PromiseLike) | undefined | null, + ): Promise { + return this.parse().then(onfulfilled, onrejected); + } + + override catch( + onrejected?: ((reason: any) => TResult | PromiseLike) | undefined | null, + ): Promise { + return this.parse().catch(onrejected); + } + + override finally(onfinally?: (() => void) | undefined | null): Promise { + return this.parse().finally(onfinally); + } +} + +export abstract class APIClient { + baseURL: string; + #baseURLOverridden: boolean; + maxRetries: number; + timeout: number; + httpAgent: Agent | undefined; + + private fetch: Fetch; + protected idempotencyHeader?: string; + + constructor({ + baseURL, + baseURLOverridden, + maxRetries = 2, + timeout = 60000, // 1 minute + httpAgent, + fetch: overriddenFetch, + }: { + baseURL: string; + baseURLOverridden: boolean; + maxRetries?: number | undefined; + timeout: number | undefined; + httpAgent: Agent | undefined; + fetch: Fetch | undefined; + }) { + this.baseURL = baseURL; + this.#baseURLOverridden = baseURLOverridden; + this.maxRetries = validatePositiveInteger('maxRetries', maxRetries); + this.timeout = validatePositiveInteger('timeout', timeout); + this.httpAgent = httpAgent; + + this.fetch = overriddenFetch ?? fetch; + } + + protected authHeaders(opts: FinalRequestOptions): Headers { + return {}; + } + + /** + * Override this to add your own default headers, for example: + * + * { + * ...super.defaultHeaders(), + * Authorization: 'Bearer 123', + * } + */ + protected defaultHeaders(opts: FinalRequestOptions): Headers { + return { + Accept: 'application/json', + 'Content-Type': 'application/json', + 'User-Agent': this.getUserAgent(), + ...getPlatformHeaders(), + ...this.authHeaders(opts), + }; + } + + protected abstract defaultQuery(): DefaultQuery | undefined; + + /** + * Override this to add your own headers validation: + */ + protected validateHeaders(headers: Headers, customHeaders: Headers) {} + + protected defaultIdempotencyKey(): string { + return `stainless-node-retry-${uuid4()}`; + } + + get(path: string, opts?: PromiseOrValue>): APIPromise { + return this.methodRequest('get', path, opts); + } + + post(path: string, opts?: PromiseOrValue>): APIPromise { + return this.methodRequest('post', path, opts); + } + + patch(path: string, opts?: PromiseOrValue>): APIPromise { + return this.methodRequest('patch', path, opts); + } + + put(path: string, opts?: PromiseOrValue>): APIPromise { + return this.methodRequest('put', path, opts); + } + + delete(path: string, opts?: PromiseOrValue>): APIPromise { + return this.methodRequest('delete', path, opts); + } + + private methodRequest( + method: HTTPMethod, + path: string, + opts?: PromiseOrValue>, + ): APIPromise { + return this.request( + Promise.resolve(opts).then(async (opts) => { + const body = + opts && isBlobLike(opts?.body) ? new DataView(await opts.body.arrayBuffer()) + : opts?.body instanceof DataView ? opts.body + : opts?.body instanceof ArrayBuffer ? new DataView(opts.body) + : opts && ArrayBuffer.isView(opts?.body) ? new DataView(opts.body.buffer) + : opts?.body; + return { method, path, ...opts, body }; + }), + ); + } + + getAPIList = AbstractPage>( + path: string, + Page: new (...args: any[]) => PageClass, + opts?: RequestOptions, + ): PagePromise { + return this.requestAPIList(Page, { method: 'get', path, ...opts }); + } + + private calculateContentLength(body: unknown): string | null { + if (typeof body === 'string') { + if (typeof Buffer !== 'undefined') { + return Buffer.byteLength(body, 'utf8').toString(); + } + + if (typeof TextEncoder !== 'undefined') { + const encoder = new TextEncoder(); + const encoded = encoder.encode(body); + return encoded.length.toString(); + } + } else if (ArrayBuffer.isView(body)) { + return body.byteLength.toString(); + } + + return null; + } + + buildRequest( + inputOptions: FinalRequestOptions, + { retryCount = 0 }: { retryCount?: number } = {}, + ): { req: RequestInit; url: string; timeout: number } { + const options = { ...inputOptions }; + const { method, path, query, defaultBaseURL, headers: headers = {} } = options; + + const body = + ArrayBuffer.isView(options.body) || (options.__binaryRequest && typeof options.body === 'string') ? + options.body + : isMultipartBody(options.body) ? options.body.body + : options.body ? JSON.stringify(options.body, null, 2) + : null; + const contentLength = this.calculateContentLength(body); + + const url = this.buildURL(path!, query, defaultBaseURL); + if ('timeout' in options) validatePositiveInteger('timeout', options.timeout); + options.timeout = options.timeout ?? this.timeout; + const httpAgent = options.httpAgent ?? this.httpAgent ?? getDefaultAgent(url); + const minAgentTimeout = options.timeout + 1000; + if ( + typeof (httpAgent as any)?.options?.timeout === 'number' && + minAgentTimeout > ((httpAgent as any).options.timeout ?? 0) + ) { + // Allow any given request to bump our agent active socket timeout. + // This may seem strange, but leaking active sockets should be rare and not particularly problematic, + // and without mutating agent we would need to create more of them. + // This tradeoff optimizes for performance. + (httpAgent as any).options.timeout = minAgentTimeout; + } + + if (this.idempotencyHeader && method !== 'get') { + if (!inputOptions.idempotencyKey) inputOptions.idempotencyKey = this.defaultIdempotencyKey(); + headers[this.idempotencyHeader] = inputOptions.idempotencyKey; + } + + const reqHeaders = this.buildHeaders({ options, headers, contentLength, retryCount }); + + const req: RequestInit = { + method, + ...(body && { body: body as any }), + headers: reqHeaders, + ...(httpAgent && { agent: httpAgent }), + // @ts-ignore node-fetch uses a custom AbortSignal type that is + // not compatible with standard web types + signal: options.signal ?? null, + }; + + return { req, url, timeout: options.timeout }; + } + + private buildHeaders({ + options, + headers, + contentLength, + retryCount, + }: { + options: FinalRequestOptions; + headers: Record; + contentLength: string | null | undefined; + retryCount: number; + }): Record { + const reqHeaders: Record = {}; + if (contentLength) { + reqHeaders['content-length'] = contentLength; + } + + const defaultHeaders = this.defaultHeaders(options); + applyHeadersMut(reqHeaders, defaultHeaders); + applyHeadersMut(reqHeaders, headers); + + // let builtin fetch set the Content-Type for multipart bodies + if (isMultipartBody(options.body) && shimsKind !== 'node') { + delete reqHeaders['content-type']; + } + + // Don't set theses headers if they were already set or removed through default headers or by the caller. + // We check `defaultHeaders` and `headers`, which can contain nulls, instead of `reqHeaders` to account + // for the removal case. + if ( + getHeader(defaultHeaders, 'x-stainless-retry-count') === undefined && + getHeader(headers, 'x-stainless-retry-count') === undefined + ) { + reqHeaders['x-stainless-retry-count'] = String(retryCount); + } + if ( + getHeader(defaultHeaders, 'x-stainless-timeout') === undefined && + getHeader(headers, 'x-stainless-timeout') === undefined && + options.timeout + ) { + reqHeaders['x-stainless-timeout'] = String(Math.trunc(options.timeout / 1000)); + } + + this.validateHeaders(reqHeaders, headers); + + return reqHeaders; + } + + /** + * Used as a callback for mutating the given `FinalRequestOptions` object. + */ + protected async prepareOptions(options: FinalRequestOptions): Promise {} + + /** + * Used as a callback for mutating the given `RequestInit` object. + * + * This is useful for cases where you want to add certain headers based off of + * the request properties, e.g. `method` or `url`. + */ + protected async prepareRequest( + request: RequestInit, + { url, options }: { url: string; options: FinalRequestOptions }, + ): Promise {} + + protected parseHeaders(headers: HeadersInit | null | undefined): Record { + return ( + !headers ? {} + : Symbol.iterator in headers ? + Object.fromEntries(Array.from(headers as Iterable).map((header) => [...header])) + : { ...(headers as any as Record) } + ); + } + + protected makeStatusError( + status: number | undefined, + error: Object | undefined, + message: string | undefined, + headers: Headers | undefined, + ): APIError { + return APIError.generate(status, error, message, headers); + } + + request( + options: PromiseOrValue>, + remainingRetries: number | null = null, + ): APIPromise { + return new APIPromise(this.makeRequest(options, remainingRetries)); + } + + private async makeRequest( + optionsInput: PromiseOrValue>, + retriesRemaining: number | null, + ): Promise { + const options = await optionsInput; + const maxRetries = options.maxRetries ?? this.maxRetries; + if (retriesRemaining == null) { + retriesRemaining = maxRetries; + } + + await this.prepareOptions(options); + + const { req, url, timeout } = this.buildRequest(options, { retryCount: maxRetries - retriesRemaining }); + + await this.prepareRequest(req, { url, options }); + + debug('request', url, options, req.headers); + + if (options.signal?.aborted) { + throw new APIUserAbortError(); + } + + const controller = new AbortController(); + const response = await this.fetchWithTimeout(url, req, timeout, controller).catch(castToError); + + if (response instanceof Error) { + if (options.signal?.aborted) { + throw new APIUserAbortError(); + } + if (retriesRemaining) { + return this.retryRequest(options, retriesRemaining); + } + if (response.name === 'AbortError') { + throw new APIConnectionTimeoutError(); + } + throw new APIConnectionError({ cause: response }); + } + + const responseHeaders = createResponseHeaders(response.headers); + + if (!response.ok) { + if (retriesRemaining && this.shouldRetry(response)) { + const retryMessage = `retrying, ${retriesRemaining} attempts remaining`; + debug(`response (error; ${retryMessage})`, response.status, url, responseHeaders); + return this.retryRequest(options, retriesRemaining, responseHeaders); + } + + const errText = await response.text().catch((e) => castToError(e).message); + const errJSON = safeJSON(errText); + const errMessage = errJSON ? undefined : errText; + const retryMessage = retriesRemaining ? `(error; no more retries left)` : `(error; not retryable)`; + + debug(`response (error; ${retryMessage})`, response.status, url, responseHeaders, errMessage); + + const err = this.makeStatusError(response.status, errJSON, errMessage, responseHeaders); + throw err; + } + + return { response, options, controller }; + } + + requestAPIList = AbstractPage>( + Page: new (...args: ConstructorParameters) => PageClass, + options: FinalRequestOptions, + ): PagePromise { + const request = this.makeRequest(options, null); + return new PagePromise(this, request, Page); + } + + buildURL(path: string, query: Req | null | undefined, defaultBaseURL?: string | undefined): string { + const baseURL = (!this.#baseURLOverridden && defaultBaseURL) || this.baseURL; + const url = + isAbsoluteURL(path) ? + new URL(path) + : new URL(baseURL + (baseURL.endsWith('/') && path.startsWith('/') ? path.slice(1) : path)); + + const defaultQuery = this.defaultQuery(); + if (!isEmptyObj(defaultQuery)) { + query = { ...defaultQuery, ...query } as Req; + } + + if (typeof query === 'object' && query && !Array.isArray(query)) { + url.search = this.stringifyQuery(query as Record); + } + + return url.toString(); + } + + protected stringifyQuery(query: Record): string { + return Object.entries(query) + .filter(([_, value]) => typeof value !== 'undefined') + .map(([key, value]) => { + if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { + return `${encodeURIComponent(key)}=${encodeURIComponent(value)}`; + } + if (value === null) { + return `${encodeURIComponent(key)}=`; + } + throw new LlamaStackClientError( + `Cannot stringify type ${typeof value}; Expected string, number, boolean, or null. If you need to pass nested query parameters, you can manually encode them, e.g. { query: { 'foo[key1]': value1, 'foo[key2]': value2 } }, and please open a GitHub issue requesting better support for your use case.`, + ); + }) + .join('&'); + } + + async fetchWithTimeout( + url: RequestInfo, + init: RequestInit | undefined, + ms: number, + controller: AbortController, + ): Promise { + const { signal, ...options } = init || {}; + if (signal) signal.addEventListener('abort', () => controller.abort()); + + const timeout = setTimeout(() => controller.abort(), ms); + + const fetchOptions = { + signal: controller.signal as any, + ...options, + }; + if (fetchOptions.method) { + // Custom methods like 'patch' need to be uppercased + // See https://github.com/nodejs/undici/issues/2294 + fetchOptions.method = fetchOptions.method.toUpperCase(); + } + + return ( + // use undefined this binding; fetch errors if bound to something else in browser/cloudflare + this.fetch.call(undefined, url, fetchOptions).finally(() => { + clearTimeout(timeout); + }) + ); + } + + private shouldRetry(response: Response): boolean { + // Note this is not a standard header. + const shouldRetryHeader = response.headers.get('x-should-retry'); + + // If the server explicitly says whether or not to retry, obey. + if (shouldRetryHeader === 'true') return true; + if (shouldRetryHeader === 'false') return false; + + // Retry on request timeouts. + if (response.status === 408) return true; + + // Retry on lock timeouts. + if (response.status === 409) return true; + + // Retry on rate limits. + if (response.status === 429) return true; + + // Retry internal errors. + if (response.status >= 500) return true; + + return false; + } + + private async retryRequest( + options: FinalRequestOptions, + retriesRemaining: number, + responseHeaders?: Headers | undefined, + ): Promise { + let timeoutMillis: number | undefined; + + // Note the `retry-after-ms` header may not be standard, but is a good idea and we'd like proactive support for it. + const retryAfterMillisHeader = responseHeaders?.['retry-after-ms']; + if (retryAfterMillisHeader) { + const timeoutMs = parseFloat(retryAfterMillisHeader); + if (!Number.isNaN(timeoutMs)) { + timeoutMillis = timeoutMs; + } + } + + // About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After + const retryAfterHeader = responseHeaders?.['retry-after']; + if (retryAfterHeader && !timeoutMillis) { + const timeoutSeconds = parseFloat(retryAfterHeader); + if (!Number.isNaN(timeoutSeconds)) { + timeoutMillis = timeoutSeconds * 1000; + } else { + timeoutMillis = Date.parse(retryAfterHeader) - Date.now(); + } + } + + // If the API asks us to wait a certain amount of time (and it's a reasonable amount), + // just do what it says, but otherwise calculate a default + if (!(timeoutMillis && 0 <= timeoutMillis && timeoutMillis < 60 * 1000)) { + const maxRetries = options.maxRetries ?? this.maxRetries; + timeoutMillis = this.calculateDefaultRetryTimeoutMillis(retriesRemaining, maxRetries); + } + await sleep(timeoutMillis); + + return this.makeRequest(options, retriesRemaining - 1); + } + + private calculateDefaultRetryTimeoutMillis(retriesRemaining: number, maxRetries: number): number { + const initialRetryDelay = 0.5; + const maxRetryDelay = 8.0; + + const numRetries = maxRetries - retriesRemaining; + + // Apply exponential backoff, but not more than the max. + const sleepSeconds = Math.min(initialRetryDelay * Math.pow(2, numRetries), maxRetryDelay); + + // Apply some jitter, take up to at most 25 percent of the retry time. + const jitter = 1 - Math.random() * 0.25; + + return sleepSeconds * jitter * 1000; + } + + private getUserAgent(): string { + return `${this.constructor.name}/JS ${VERSION}`; + } +} + +export type PageInfo = { url: URL } | { params: Record | null }; + +export abstract class AbstractPage implements AsyncIterable { + #client: APIClient; + protected options: FinalRequestOptions; + + protected response: Response; + protected body: unknown; + + constructor(client: APIClient, response: Response, body: unknown, options: FinalRequestOptions) { + this.#client = client; + this.options = options; + this.response = response; + this.body = body; + } + + /** + * @deprecated Use nextPageInfo instead + */ + abstract nextPageParams(): Partial> | null; + abstract nextPageInfo(): PageInfo | null; + + abstract getPaginatedItems(): Item[]; + + hasNextPage(): boolean { + const items = this.getPaginatedItems(); + if (!items.length) return false; + return this.nextPageInfo() != null; + } + + async getNextPage(): Promise { + const nextInfo = this.nextPageInfo(); + if (!nextInfo) { + throw new LlamaStackClientError( + 'No next page expected; please check `.hasNextPage()` before calling `.getNextPage()`.', + ); + } + const nextOptions = { ...this.options }; + if ('params' in nextInfo && typeof nextOptions.query === 'object') { + nextOptions.query = { ...nextOptions.query, ...nextInfo.params }; + } else if ('url' in nextInfo) { + const params = [...Object.entries(nextOptions.query || {}), ...nextInfo.url.searchParams.entries()]; + for (const [key, value] of params) { + nextInfo.url.searchParams.set(key, value as any); + } + nextOptions.query = undefined; + nextOptions.path = nextInfo.url.toString(); + } + return await this.#client.requestAPIList(this.constructor as any, nextOptions); + } + + async *iterPages(): AsyncGenerator { + // eslint-disable-next-line @typescript-eslint/no-this-alias + let page: this = this; + yield page; + while (page.hasNextPage()) { + page = await page.getNextPage(); + yield page; + } + } + + async *[Symbol.asyncIterator](): AsyncGenerator { + for await (const page of this.iterPages()) { + for (const item of page.getPaginatedItems()) { + yield item; + } + } + } +} + +/** + * This subclass of Promise will resolve to an instantiated Page once the request completes. + * + * It also implements AsyncIterable to allow auto-paginating iteration on an unawaited list call, eg: + * + * for await (const item of client.items.list()) { + * console.log(item) + * } + */ +export class PagePromise< + PageClass extends AbstractPage, + Item = ReturnType[number], + > + extends APIPromise + implements AsyncIterable +{ + constructor( + client: APIClient, + request: Promise, + Page: new (...args: ConstructorParameters) => PageClass, + ) { + super( + request, + async (props) => new Page(client, props.response, await defaultParseResponse(props), props.options), + ); + } + + /** + * Allow auto-paginating iteration on an unawaited list call, eg: + * + * for await (const item of client.items.list()) { + * console.log(item) + * } + */ + async *[Symbol.asyncIterator](): AsyncGenerator { + const page = await this; + for await (const item of page) { + yield item; + } + } +} + +export const createResponseHeaders = ( + headers: Awaited>['headers'], +): Record => { + return new Proxy( + Object.fromEntries( + // @ts-ignore + headers.entries(), + ), + { + get(target, name) { + const key = name.toString(); + return target[key.toLowerCase()] || target[key]; + }, + }, + ); +}; + +type HTTPMethod = 'get' | 'post' | 'put' | 'patch' | 'delete'; + +export type RequestClient = { fetch: Fetch }; +export type Headers = Record; +export type DefaultQuery = Record; +export type KeysEnum = { [P in keyof Required]: true }; + +export type RequestOptions< + Req = unknown | Record | Readable | BlobLike | ArrayBufferView | ArrayBuffer, +> = { + method?: HTTPMethod; + path?: string; + query?: Req | undefined; + body?: Req | null | undefined; + headers?: Headers | undefined; + defaultBaseURL?: string | undefined; + + maxRetries?: number; + stream?: boolean | undefined; + timeout?: number; + httpAgent?: Agent; + signal?: AbortSignal | undefined | null; + idempotencyKey?: string; + + __binaryRequest?: boolean | undefined; + __binaryResponse?: boolean | undefined; + __streamClass?: typeof Stream; +}; + +// This is required so that we can determine if a given object matches the RequestOptions +// type at runtime. While this requires duplication, it is enforced by the TypeScript +// compiler such that any missing / extraneous keys will cause an error. +const requestOptionsKeys: KeysEnum = { + method: true, + path: true, + query: true, + body: true, + headers: true, + defaultBaseURL: true, + + maxRetries: true, + stream: true, + timeout: true, + httpAgent: true, + signal: true, + idempotencyKey: true, + + __binaryRequest: true, + __binaryResponse: true, + __streamClass: true, +}; + +export const isRequestOptions = (obj: unknown): obj is RequestOptions => { + return ( + typeof obj === 'object' && + obj !== null && + !isEmptyObj(obj) && + Object.keys(obj).every((k) => hasOwn(requestOptionsKeys, k)) + ); +}; + +export type FinalRequestOptions | Readable | DataView> = + RequestOptions & { + method: HTTPMethod; + path: string; + }; + +declare const Deno: any; +declare const EdgeRuntime: any; +type Arch = 'x32' | 'x64' | 'arm' | 'arm64' | `other:${string}` | 'unknown'; +type PlatformName = + | 'MacOS' + | 'Linux' + | 'Windows' + | 'FreeBSD' + | 'OpenBSD' + | 'iOS' + | 'Android' + | `Other:${string}` + | 'Unknown'; +type Browser = 'ie' | 'edge' | 'chrome' | 'firefox' | 'safari'; +type PlatformProperties = { + 'X-Stainless-Lang': 'js'; + 'X-Stainless-Package-Version': string; + 'X-Stainless-OS': PlatformName; + 'X-Stainless-Arch': Arch; + 'X-Stainless-Runtime': 'node' | 'deno' | 'edge' | `browser:${Browser}` | 'unknown'; + 'X-Stainless-Runtime-Version': string; +}; +const getPlatformProperties = (): PlatformProperties => { + if (typeof Deno !== 'undefined' && Deno.build != null) { + return { + 'X-Stainless-Lang': 'js', + 'X-Stainless-Package-Version': VERSION, + 'X-Stainless-OS': normalizePlatform(Deno.build.os), + 'X-Stainless-Arch': normalizeArch(Deno.build.arch), + 'X-Stainless-Runtime': 'deno', + 'X-Stainless-Runtime-Version': + typeof Deno.version === 'string' ? Deno.version : Deno.version?.deno ?? 'unknown', + }; + } + if (typeof EdgeRuntime !== 'undefined') { + return { + 'X-Stainless-Lang': 'js', + 'X-Stainless-Package-Version': VERSION, + 'X-Stainless-OS': 'Unknown', + 'X-Stainless-Arch': `other:${EdgeRuntime}`, + 'X-Stainless-Runtime': 'edge', + 'X-Stainless-Runtime-Version': process.version, + }; + } + // Check if Node.js + if (Object.prototype.toString.call(typeof process !== 'undefined' ? process : 0) === '[object process]') { + return { + 'X-Stainless-Lang': 'js', + 'X-Stainless-Package-Version': VERSION, + 'X-Stainless-OS': normalizePlatform(process.platform), + 'X-Stainless-Arch': normalizeArch(process.arch), + 'X-Stainless-Runtime': 'node', + 'X-Stainless-Runtime-Version': process.version, + }; + } + + const browserInfo = getBrowserInfo(); + if (browserInfo) { + return { + 'X-Stainless-Lang': 'js', + 'X-Stainless-Package-Version': VERSION, + 'X-Stainless-OS': 'Unknown', + 'X-Stainless-Arch': 'unknown', + 'X-Stainless-Runtime': `browser:${browserInfo.browser}`, + 'X-Stainless-Runtime-Version': browserInfo.version, + }; + } + + // TODO add support for Cloudflare workers, etc. + return { + 'X-Stainless-Lang': 'js', + 'X-Stainless-Package-Version': VERSION, + 'X-Stainless-OS': 'Unknown', + 'X-Stainless-Arch': 'unknown', + 'X-Stainless-Runtime': 'unknown', + 'X-Stainless-Runtime-Version': 'unknown', + }; +}; + +type BrowserInfo = { + browser: Browser; + version: string; +}; + +declare const navigator: { userAgent: string } | undefined; + +// Note: modified from https://github.com/JS-DevTools/host-environment/blob/b1ab79ecde37db5d6e163c050e54fe7d287d7c92/src/isomorphic.browser.ts +function getBrowserInfo(): BrowserInfo | null { + if (typeof navigator === 'undefined' || !navigator) { + return null; + } + + // NOTE: The order matters here! + const browserPatterns = [ + { key: 'edge' as const, pattern: /Edge(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, + { key: 'ie' as const, pattern: /MSIE(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, + { key: 'ie' as const, pattern: /Trident(?:.*rv\:(\d+)\.(\d+)(?:\.(\d+))?)?/ }, + { key: 'chrome' as const, pattern: /Chrome(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, + { key: 'firefox' as const, pattern: /Firefox(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, + { key: 'safari' as const, pattern: /(?:Version\W+(\d+)\.(\d+)(?:\.(\d+))?)?(?:\W+Mobile\S*)?\W+Safari/ }, + ]; + + // Find the FIRST matching browser + for (const { key, pattern } of browserPatterns) { + const match = pattern.exec(navigator.userAgent); + if (match) { + const major = match[1] || 0; + const minor = match[2] || 0; + const patch = match[3] || 0; + + return { browser: key, version: `${major}.${minor}.${patch}` }; + } + } + + return null; +} + +const normalizeArch = (arch: string): Arch => { + // Node docs: + // - https://nodejs.org/api/process.html#processarch + // Deno docs: + // - https://doc.deno.land/deno/stable/~/Deno.build + if (arch === 'x32') return 'x32'; + if (arch === 'x86_64' || arch === 'x64') return 'x64'; + if (arch === 'arm') return 'arm'; + if (arch === 'aarch64' || arch === 'arm64') return 'arm64'; + if (arch) return `other:${arch}`; + return 'unknown'; +}; + +const normalizePlatform = (platform: string): PlatformName => { + // Node platforms: + // - https://nodejs.org/api/process.html#processplatform + // Deno platforms: + // - https://doc.deno.land/deno/stable/~/Deno.build + // - https://github.com/denoland/deno/issues/14799 + + platform = platform.toLowerCase(); + + // NOTE: this iOS check is untested and may not work + // Node does not work natively on IOS, there is a fork at + // https://github.com/nodejs-mobile/nodejs-mobile + // however it is unknown at the time of writing how to detect if it is running + if (platform.includes('ios')) return 'iOS'; + if (platform === 'android') return 'Android'; + if (platform === 'darwin') return 'MacOS'; + if (platform === 'win32') return 'Windows'; + if (platform === 'freebsd') return 'FreeBSD'; + if (platform === 'openbsd') return 'OpenBSD'; + if (platform === 'linux') return 'Linux'; + if (platform) return `Other:${platform}`; + return 'Unknown'; +}; + +let _platformHeaders: PlatformProperties; +const getPlatformHeaders = () => { + return (_platformHeaders ??= getPlatformProperties()); +}; + +export const safeJSON = (text: string) => { + try { + return JSON.parse(text); + } catch (err) { + return undefined; + } +}; + +// https://url.spec.whatwg.org/#url-scheme-string +const startsWithSchemeRegexp = /^[a-z][a-z0-9+.-]*:/i; +const isAbsoluteURL = (url: string): boolean => { + return startsWithSchemeRegexp.test(url); +}; + +export const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); + +const validatePositiveInteger = (name: string, n: unknown): number => { + if (typeof n !== 'number' || !Number.isInteger(n)) { + throw new LlamaStackClientError(`${name} must be an integer`); + } + if (n < 0) { + throw new LlamaStackClientError(`${name} must be a positive integer`); + } + return n; +}; + +export const castToError = (err: any): Error => { + if (err instanceof Error) return err; + if (typeof err === 'object' && err !== null) { + try { + return new Error(JSON.stringify(err)); + } catch {} + } + return new Error(err); +}; + +export const ensurePresent = (value: T | null | undefined): T => { + if (value == null) + throw new LlamaStackClientError(`Expected a value to be given but received ${value} instead.`); + return value; +}; + +/** + * Read an environment variable. + * + * Trims beginning and trailing whitespace. + * + * Will return undefined if the environment variable doesn't exist or cannot be accessed. + */ +export const readEnv = (env: string): string | undefined => { + if (typeof process !== 'undefined') { + return process.env?.[env]?.trim() ?? undefined; + } + if (typeof Deno !== 'undefined') { + return Deno.env?.get?.(env)?.trim(); + } + return undefined; +}; + +export const coerceInteger = (value: unknown): number => { + if (typeof value === 'number') return Math.round(value); + if (typeof value === 'string') return parseInt(value, 10); + + throw new LlamaStackClientError(`Could not coerce ${value} (type: ${typeof value}) into a number`); +}; + +export const coerceFloat = (value: unknown): number => { + if (typeof value === 'number') return value; + if (typeof value === 'string') return parseFloat(value); + + throw new LlamaStackClientError(`Could not coerce ${value} (type: ${typeof value}) into a number`); +}; + +export const coerceBoolean = (value: unknown): boolean => { + if (typeof value === 'boolean') return value; + if (typeof value === 'string') return value === 'true'; + return Boolean(value); +}; + +export const maybeCoerceInteger = (value: unknown): number | undefined => { + if (value === undefined) { + return undefined; + } + return coerceInteger(value); +}; + +export const maybeCoerceFloat = (value: unknown): number | undefined => { + if (value === undefined) { + return undefined; + } + return coerceFloat(value); +}; + +export const maybeCoerceBoolean = (value: unknown): boolean | undefined => { + if (value === undefined) { + return undefined; + } + return coerceBoolean(value); +}; + +// https://stackoverflow.com/a/34491287 +export function isEmptyObj(obj: Object | null | undefined): boolean { + if (!obj) return true; + for (const _k in obj) return false; + return true; +} + +// https://eslint.org/docs/latest/rules/no-prototype-builtins +export function hasOwn(obj: Object, key: string): boolean { + return Object.prototype.hasOwnProperty.call(obj, key); +} + +/** + * Copies headers from "newHeaders" onto "targetHeaders", + * using lower-case for all properties, + * ignoring any keys with undefined values, + * and deleting any keys with null values. + */ +function applyHeadersMut(targetHeaders: Headers, newHeaders: Headers): void { + for (const k in newHeaders) { + if (!hasOwn(newHeaders, k)) continue; + const lowerKey = k.toLowerCase(); + if (!lowerKey) continue; + + const val = newHeaders[k]; + + if (val === null) { + delete targetHeaders[lowerKey]; + } else if (val !== undefined) { + targetHeaders[lowerKey] = val; + } + } +} + +export function debug(action: string, ...args: any[]) { + if (typeof process !== 'undefined' && process?.env?.['DEBUG'] === 'true') { + console.log(`LlamaStackClient:DEBUG:${action}`, ...args); + } +} + +/** + * https://stackoverflow.com/a/2117523 + */ +const uuid4 = () => { + return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = (Math.random() * 16) | 0; + const v = c === 'x' ? r : (r & 0x3) | 0x8; + return v.toString(16); + }); +}; + +export const isRunningInBrowser = () => { + return ( + // @ts-ignore + typeof window !== 'undefined' && + // @ts-ignore + typeof window.document !== 'undefined' && + // @ts-ignore + typeof navigator !== 'undefined' + ); +}; + +export interface HeadersProtocol { + get: (header: string) => string | null | undefined; +} +export type HeadersLike = Record | HeadersProtocol; + +export const isHeadersProtocol = (headers: any): headers is HeadersProtocol => { + return typeof headers?.get === 'function'; +}; + +export const getRequiredHeader = (headers: HeadersLike | Headers, header: string): string => { + const foundHeader = getHeader(headers, header); + if (foundHeader === undefined) { + throw new Error(`Could not find ${header} header`); + } + return foundHeader; +}; + +export const getHeader = (headers: HeadersLike | Headers, header: string): string | undefined => { + const lowerCasedHeader = header.toLowerCase(); + if (isHeadersProtocol(headers)) { + // to deal with the case where the header looks like Stainless-Event-Id + const intercapsHeader = + header[0]?.toUpperCase() + + header.substring(1).replace(/([^\w])(\w)/g, (_m, g1, g2) => g1 + g2.toUpperCase()); + for (const key of [header, lowerCasedHeader, header.toUpperCase(), intercapsHeader]) { + const value = headers.get(key); + if (value) { + return value; + } + } + } + + for (const [key, value] of Object.entries(headers)) { + if (key.toLowerCase() === lowerCasedHeader) { + if (Array.isArray(value)) { + if (value.length <= 1) return value[0]; + console.warn(`Received ${value.length} entries for the ${header} header, using the first entry.`); + return value[0]; + } + return value; + } + } + + return undefined; +}; + +/** + * Encodes a string to Base64 format. + */ +export const toBase64 = (str: string | null | undefined): string => { + if (!str) return ''; + if (typeof Buffer !== 'undefined') { + return Buffer.from(str).toString('base64'); + } + + if (typeof btoa !== 'undefined') { + return btoa(str); + } + + throw new LlamaStackClientError('Cannot generate b64 string; Expected `Buffer` or `btoa` to be defined'); +}; + +export function isObj(obj: unknown): obj is Record { + return obj != null && typeof obj === 'object' && !Array.isArray(obj); +} diff --git a/src/core/README.md b/src/core/README.md deleted file mode 100644 index 485fce8..0000000 --- a/src/core/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `core` - -This directory holds public modules implementing non-resource-specific SDK functionality. diff --git a/src/core/api-promise.ts b/src/core/api-promise.ts deleted file mode 100644 index 7bf720b..0000000 --- a/src/core/api-promise.ts +++ /dev/null @@ -1,92 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { type LlamaStackClient } from '../client'; - -import { type PromiseOrValue } from '../internal/types'; -import { APIResponseProps, defaultParseResponse } from '../internal/parse'; - -/** - * A subclass of `Promise` providing additional helper methods - * for interacting with the SDK. - */ -export class APIPromise extends Promise { - private parsedPromise: Promise | undefined; - #client: LlamaStackClient; - - constructor( - client: LlamaStackClient, - private responsePromise: Promise, - private parseResponse: ( - client: LlamaStackClient, - props: APIResponseProps, - ) => PromiseOrValue = defaultParseResponse, - ) { - super((resolve) => { - // this is maybe a bit weird but this has to be a no-op to not implicitly - // parse the response body; instead .then, .catch, .finally are overridden - // to parse the response - resolve(null as any); - }); - this.#client = client; - } - - _thenUnwrap(transform: (data: T, props: APIResponseProps) => U): APIPromise { - return new APIPromise(this.#client, this.responsePromise, async (client, props) => - transform(await this.parseResponse(client, props), props), - ); - } - - /** - * Gets the raw `Response` instance instead of parsing the response - * data. - * - * If you want to parse the response body but still get the `Response` - * instance, you can use {@link withResponse()}. - * - * 👋 Getting the wrong TypeScript type for `Response`? - * Try setting `"moduleResolution": "NodeNext"` or add `"lib": ["DOM"]` - * to your `tsconfig.json`. - */ - asResponse(): Promise { - return this.responsePromise.then((p) => p.response); - } - - /** - * Gets the parsed response data and the raw `Response` instance. - * - * If you just want to get the raw `Response` instance without parsing it, - * you can use {@link asResponse()}. - * - * 👋 Getting the wrong TypeScript type for `Response`? - * Try setting `"moduleResolution": "NodeNext"` or add `"lib": ["DOM"]` - * to your `tsconfig.json`. - */ - async withResponse(): Promise<{ data: T; response: Response }> { - const [data, response] = await Promise.all([this.parse(), this.asResponse()]); - return { data, response }; - } - - private parse(): Promise { - if (!this.parsedPromise) { - this.parsedPromise = this.responsePromise.then((data) => this.parseResponse(this.#client, data)); - } - return this.parsedPromise; - } - - override then( - onfulfilled?: ((value: T) => TResult1 | PromiseLike) | undefined | null, - onrejected?: ((reason: any) => TResult2 | PromiseLike) | undefined | null, - ): Promise { - return this.parse().then(onfulfilled, onrejected); - } - - override catch( - onrejected?: ((reason: any) => TResult | PromiseLike) | undefined | null, - ): Promise { - return this.parse().catch(onrejected); - } - - override finally(onfinally?: (() => void) | undefined | null): Promise { - return this.parse().finally(onfinally); - } -} diff --git a/src/core/error.ts b/src/core/error.ts deleted file mode 100644 index a54d849..0000000 --- a/src/core/error.ts +++ /dev/null @@ -1,130 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { castToError } from '../internal/errors'; - -export class LlamaStackClientError extends Error {} - -export class APIError< - TStatus extends number | undefined = number | undefined, - THeaders extends Headers | undefined = Headers | undefined, - TError extends Object | undefined = Object | undefined, -> extends LlamaStackClientError { - /** HTTP status for the response that caused the error */ - readonly status: TStatus; - /** HTTP headers for the response that caused the error */ - readonly headers: THeaders; - /** JSON body of the response that caused the error */ - readonly error: TError; - - constructor(status: TStatus, error: TError, message: string | undefined, headers: THeaders) { - super(`${APIError.makeMessage(status, error, message)}`); - this.status = status; - this.headers = headers; - this.error = error; - } - - private static makeMessage(status: number | undefined, error: any, message: string | undefined) { - const msg = - error?.message ? - typeof error.message === 'string' ? - error.message - : JSON.stringify(error.message) - : error ? JSON.stringify(error) - : message; - - if (status && msg) { - return `${status} ${msg}`; - } - if (status) { - return `${status} status code (no body)`; - } - if (msg) { - return msg; - } - return '(no status code or body)'; - } - - static generate( - status: number | undefined, - errorResponse: Object | undefined, - message: string | undefined, - headers: Headers | undefined, - ): APIError { - if (!status || !headers) { - return new APIConnectionError({ message, cause: castToError(errorResponse) }); - } - - const error = errorResponse as Record; - - if (status === 400) { - return new BadRequestError(status, error, message, headers); - } - - if (status === 401) { - return new AuthenticationError(status, error, message, headers); - } - - if (status === 403) { - return new PermissionDeniedError(status, error, message, headers); - } - - if (status === 404) { - return new NotFoundError(status, error, message, headers); - } - - if (status === 409) { - return new ConflictError(status, error, message, headers); - } - - if (status === 422) { - return new UnprocessableEntityError(status, error, message, headers); - } - - if (status === 429) { - return new RateLimitError(status, error, message, headers); - } - - if (status >= 500) { - return new InternalServerError(status, error, message, headers); - } - - return new APIError(status, error, message, headers); - } -} - -export class APIUserAbortError extends APIError { - constructor({ message }: { message?: string } = {}) { - super(undefined, undefined, message || 'Request was aborted.', undefined); - } -} - -export class APIConnectionError extends APIError { - constructor({ message, cause }: { message?: string | undefined; cause?: Error | undefined }) { - super(undefined, undefined, message || 'Connection error.', undefined); - // in some environments the 'cause' property is already declared - // @ts-ignore - if (cause) this.cause = cause; - } -} - -export class APIConnectionTimeoutError extends APIConnectionError { - constructor({ message }: { message?: string } = {}) { - super({ message: message ?? 'Request timed out.' }); - } -} - -export class BadRequestError extends APIError<400, Headers> {} - -export class AuthenticationError extends APIError<401, Headers> {} - -export class PermissionDeniedError extends APIError<403, Headers> {} - -export class NotFoundError extends APIError<404, Headers> {} - -export class ConflictError extends APIError<409, Headers> {} - -export class UnprocessableEntityError extends APIError<422, Headers> {} - -export class RateLimitError extends APIError<429, Headers> {} - -export class InternalServerError extends APIError {} diff --git a/src/core/resource.ts b/src/core/resource.ts deleted file mode 100644 index 7c7f947..0000000 --- a/src/core/resource.ts +++ /dev/null @@ -1,11 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import type { LlamaStackClient } from '../client'; - -export abstract class APIResource { - protected _client: LlamaStackClient; - - constructor(client: LlamaStackClient) { - this._client = client; - } -} diff --git a/src/core/uploads.ts b/src/core/uploads.ts deleted file mode 100644 index 2882ca6..0000000 --- a/src/core/uploads.ts +++ /dev/null @@ -1,2 +0,0 @@ -export { type Uploadable } from '../internal/uploads'; -export { toFile, type ToFileInput } from '../internal/to-file'; diff --git a/src/error.ts b/src/error.ts index fc55f46..a3129f7 100644 --- a/src/error.ts +++ b/src/error.ts @@ -1,2 +1,130 @@ -/** @deprecated Import from ./core/error instead */ -export * from './core/error'; +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { castToError, Headers } from './core'; + +export class LlamaStackClientError extends Error {} + +export class APIError< + TStatus extends number | undefined = number | undefined, + THeaders extends Headers | undefined = Headers | undefined, + TError extends Object | undefined = Object | undefined, +> extends LlamaStackClientError { + /** HTTP status for the response that caused the error */ + readonly status: TStatus; + /** HTTP headers for the response that caused the error */ + readonly headers: THeaders; + /** JSON body of the response that caused the error */ + readonly error: TError; + + constructor(status: TStatus, error: TError, message: string | undefined, headers: THeaders) { + super(`${APIError.makeMessage(status, error, message)}`); + this.status = status; + this.headers = headers; + this.error = error; + } + + private static makeMessage(status: number | undefined, error: any, message: string | undefined) { + const msg = + error?.message ? + typeof error.message === 'string' ? + error.message + : JSON.stringify(error.message) + : error ? JSON.stringify(error) + : message; + + if (status && msg) { + return `${status} ${msg}`; + } + if (status) { + return `${status} status code (no body)`; + } + if (msg) { + return msg; + } + return '(no status code or body)'; + } + + static generate( + status: number | undefined, + errorResponse: Object | undefined, + message: string | undefined, + headers: Headers | undefined, + ): APIError { + if (!status || !headers) { + return new APIConnectionError({ message, cause: castToError(errorResponse) }); + } + + const error = errorResponse as Record; + + if (status === 400) { + return new BadRequestError(status, error, message, headers); + } + + if (status === 401) { + return new AuthenticationError(status, error, message, headers); + } + + if (status === 403) { + return new PermissionDeniedError(status, error, message, headers); + } + + if (status === 404) { + return new NotFoundError(status, error, message, headers); + } + + if (status === 409) { + return new ConflictError(status, error, message, headers); + } + + if (status === 422) { + return new UnprocessableEntityError(status, error, message, headers); + } + + if (status === 429) { + return new RateLimitError(status, error, message, headers); + } + + if (status >= 500) { + return new InternalServerError(status, error, message, headers); + } + + return new APIError(status, error, message, headers); + } +} + +export class APIUserAbortError extends APIError { + constructor({ message }: { message?: string } = {}) { + super(undefined, undefined, message || 'Request was aborted.', undefined); + } +} + +export class APIConnectionError extends APIError { + constructor({ message, cause }: { message?: string | undefined; cause?: Error | undefined }) { + super(undefined, undefined, message || 'Connection error.', undefined); + // in some environments the 'cause' property is already declared + // @ts-ignore + if (cause) this.cause = cause; + } +} + +export class APIConnectionTimeoutError extends APIConnectionError { + constructor({ message }: { message?: string } = {}) { + super({ message: message ?? 'Request timed out.' }); + } +} + +export class BadRequestError extends APIError<400, Headers> {} + +export class AuthenticationError extends APIError<401, Headers> {} + +export class PermissionDeniedError extends APIError<403, Headers> {} + +export class NotFoundError extends APIError<404, Headers> {} + +export class ConflictError extends APIError<409, Headers> {} + +export class UnprocessableEntityError extends APIError<422, Headers> {} + +export class RateLimitError extends APIError<429, Headers> {} + +export class InternalServerError extends APIError {} diff --git a/src/index.ts b/src/index.ts index c02fa42..37945eb 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,10 +1,700 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { LlamaStackClient as default } from './client'; +import { type Agent } from './_shims/index'; +import * as qs from './internal/qs'; +import * as Core from './core'; +import * as Errors from './error'; +import * as Pagination from './pagination'; +import { type DatasetsIterrowsParams, DatasetsIterrowsResponse } from './pagination'; +import * as Uploads from './uploads'; +import * as API from './resources/index'; +import { + Benchmark, + BenchmarkListResponse, + BenchmarkRegisterParams, + Benchmarks, + ListBenchmarksResponse, +} from './resources/benchmarks'; +import { + CompletionCreateParams, + CompletionCreateParamsNonStreaming, + CompletionCreateParamsStreaming, + CompletionCreateResponse, + Completions, +} from './resources/completions'; +import { + DatasetAppendrowsParams, + DatasetIterrowsParams, + DatasetIterrowsResponse, + DatasetListResponse, + DatasetRegisterParams, + DatasetRegisterResponse, + DatasetRetrieveResponse, + Datasets, + ListDatasetsResponse, +} from './resources/datasets'; +import { CreateEmbeddingsResponse, EmbeddingCreateParams, Embeddings } from './resources/embeddings'; +import { + DeleteFileResponse, + File, + FileContentResponse, + FileCreateParams, + FileListParams, + Files, + ListFilesResponse, +} from './resources/files'; +import { + ChatCompletionResponseStreamChunk, + CompletionResponse, + EmbeddingsResponse, + Inference, + InferenceBatchChatCompletionParams, + InferenceBatchChatCompletionResponse, + InferenceBatchCompletionParams, + InferenceChatCompletionParams, + InferenceChatCompletionParamsNonStreaming, + InferenceChatCompletionParamsStreaming, + InferenceCompletionParams, + InferenceCompletionParamsNonStreaming, + InferenceCompletionParamsStreaming, + InferenceEmbeddingsParams, + TokenLogProbs, +} from './resources/inference'; +import { HealthInfo, Inspect, ProviderInfo, RouteInfo, VersionInfo } from './resources/inspect'; +import { + ListModelsResponse, + Model, + ModelListResponse, + ModelRegisterParams, + Models, +} from './resources/models'; +import { ListProvidersResponse, ProviderListResponse, Providers } from './resources/providers'; +import { ListRoutesResponse, RouteListResponse, Routes } from './resources/routes'; +import { RunShieldResponse, Safety, SafetyRunShieldParams } from './resources/safety'; +import { + Scoring, + ScoringScoreBatchParams, + ScoringScoreBatchResponse, + ScoringScoreParams, + ScoringScoreResponse, +} from './resources/scoring'; +import { + ListScoringFunctionsResponse, + ScoringFn, + ScoringFnParams, + ScoringFunctionListResponse, + ScoringFunctionRegisterParams, + ScoringFunctions, +} from './resources/scoring-functions'; +import { + ListShieldsResponse, + Shield, + ShieldListResponse, + ShieldRegisterParams, + Shields, +} from './resources/shields'; +import { + SyntheticDataGeneration, + SyntheticDataGenerationGenerateParams, + SyntheticDataGenerationResponse, +} from './resources/synthetic-data-generation'; +import { + Event, + QueryCondition, + QuerySpansResponse, + SpanWithStatus, + Telemetry, + TelemetryGetSpanResponse, + TelemetryGetSpanTreeParams, + TelemetryGetSpanTreeResponse, + TelemetryLogEventParams, + TelemetryQuerySpansParams, + TelemetryQuerySpansResponse, + TelemetryQueryTracesParams, + TelemetryQueryTracesResponse, + TelemetrySaveSpansToDatasetParams, + Trace, +} from './resources/telemetry'; +import { + ListToolGroupsResponse, + ToolGroup, + ToolgroupListResponse, + ToolgroupRegisterParams, + Toolgroups, +} from './resources/toolgroups'; +import { ListToolsResponse, Tool, ToolListParams, ToolListResponse, Tools } from './resources/tools'; +import { + ListVectorDBsResponse, + VectorDBListResponse, + VectorDBRegisterParams, + VectorDBRegisterResponse, + VectorDBRetrieveResponse, + VectorDBs, +} from './resources/vector-dbs'; +import { + QueryChunksResponse, + VectorIo, + VectorIoInsertParams, + VectorIoQueryParams, +} from './resources/vector-io'; +import { + AgentCreateParams, + AgentCreateResponse, + AgentListParams, + AgentListResponse, + AgentRetrieveResponse, + Agents, + InferenceStep, + MemoryRetrievalStep, + ShieldCallStep, + ToolExecutionStep, + ToolResponse, +} from './resources/agents/agents'; +import { Chat, ChatCompletionChunk } from './resources/chat/chat'; +import { + BenchmarkConfig, + Eval, + EvalCandidate, + EvalEvaluateRowsAlphaParams, + EvalEvaluateRowsParams, + EvalRunEvalAlphaParams, + EvalRunEvalParams, + EvaluateResponse, + Job, +} from './resources/eval/eval'; +import { + AlgorithmConfig, + ListPostTrainingJobsResponse, + PostTraining, + PostTrainingJob, + PostTrainingPreferenceOptimizeParams, + PostTrainingSupervisedFineTuneParams, +} from './resources/post-training/post-training'; +import { + ResponseCreateParams, + ResponseCreateParamsNonStreaming, + ResponseCreateParamsStreaming, + ResponseListParams, + ResponseListResponse, + ResponseObject, + ResponseObjectStream, + Responses, +} from './resources/responses/responses'; +import { + ToolDef, + ToolInvocationResult, + ToolRuntime, + ToolRuntimeInvokeToolParams, + ToolRuntimeListToolsParams, + ToolRuntimeListToolsResponse, +} from './resources/tool-runtime/tool-runtime'; +import { + ListVectorStoresResponse, + VectorStore, + VectorStoreCreateParams, + VectorStoreDeleteResponse, + VectorStoreListParams, + VectorStoreSearchParams, + VectorStoreSearchResponse, + VectorStoreUpdateParams, + VectorStores, +} from './resources/vector-stores/vector-stores'; -export { type Uploadable, toFile } from './core/uploads'; -export { APIPromise } from './core/api-promise'; -export { LlamaStackClient, type ClientOptions } from './client'; +export interface ClientOptions { + /** + * Defaults to process.env['LLAMA_STACK_CLIENT_API_KEY']. + */ + apiKey?: string | null | undefined; + + /** + * Override the default base URL for the API, e.g., "https://api.example.com/v2/" + * + * Defaults to process.env['LLAMA_STACK_CLIENT_BASE_URL']. + */ + baseURL?: string | null | undefined; + + /** + * The maximum amount of time (in milliseconds) that the client should wait for a response + * from the server before timing out a single request. + * + * Note that request timeouts are retried by default, so in a worst-case scenario you may wait + * much longer than this timeout before the promise succeeds or fails. + */ + timeout?: number | undefined; + + /** + * An HTTP agent used to manage HTTP(S) connections. + * + * If not provided, an agent will be constructed by default in the Node.js environment, + * otherwise no agent is used. + */ + httpAgent?: Agent | undefined; + + /** + * Specify a custom `fetch` function implementation. + * + * If not provided, we use `node-fetch` on Node.js and otherwise expect that `fetch` is + * defined globally. + */ + fetch?: Core.Fetch | undefined; + + /** + * The maximum number of times that the client will retry a request in case of a + * temporary failure, like a network error or a 5XX error from the server. + * + * @default 2 + */ + maxRetries?: number | undefined; + + /** + * Default headers to include with every request to the API. + * + * These can be removed in individual requests by explicitly setting the + * header to `undefined` or `null` in request options. + */ + defaultHeaders?: Core.Headers | undefined; + + /** + * Default query parameters to include with every request to the API. + * + * These can be removed in individual requests by explicitly setting the + * param to `undefined` in request options. + */ + defaultQuery?: Core.DefaultQuery | undefined; +} + +/** + * API Client for interfacing with the Llama Stack Client API. + */ +export class LlamaStackClient extends Core.APIClient { + apiKey: string | null; + + private _options: ClientOptions; + + /** + * API Client for interfacing with the Llama Stack Client API. + * + * @param {string | null | undefined} [opts.apiKey=process.env['LLAMA_STACK_CLIENT_API_KEY'] ?? null] + * @param {string} [opts.baseURL=process.env['LLAMA_STACK_CLIENT_BASE_URL'] ?? http://any-hosted-llama-stack.com] - Override the default base URL for the API. + * @param {number} [opts.timeout=1 minute] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. + * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections. + * @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation. + * @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request. + * @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API. + * @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API. + */ + constructor({ + baseURL = Core.readEnv('LLAMA_STACK_CLIENT_BASE_URL'), + apiKey = Core.readEnv('LLAMA_STACK_CLIENT_API_KEY') ?? null, + ...opts + }: ClientOptions = {}) { + const options: ClientOptions = { + apiKey, + ...opts, + baseURL: baseURL || `http://any-hosted-llama-stack.com`, + }; + + super({ + baseURL: options.baseURL!, + baseURLOverridden: baseURL ? baseURL !== 'http://any-hosted-llama-stack.com' : false, + timeout: options.timeout ?? 60000 /* 1 minute */, + httpAgent: options.httpAgent, + maxRetries: options.maxRetries, + fetch: options.fetch, + }); + + this._options = options; + + this.apiKey = apiKey; + } + + toolgroups: API.Toolgroups = new API.Toolgroups(this); + tools: API.Tools = new API.Tools(this); + toolRuntime: API.ToolRuntime = new API.ToolRuntime(this); + responses: API.Responses = new API.Responses(this); + agents: API.Agents = new API.Agents(this); + datasets: API.Datasets = new API.Datasets(this); + eval: API.Eval = new API.Eval(this); + inspect: API.Inspect = new API.Inspect(this); + inference: API.Inference = new API.Inference(this); + embeddings: API.Embeddings = new API.Embeddings(this); + chat: API.Chat = new API.Chat(this); + completions: API.Completions = new API.Completions(this); + vectorIo: API.VectorIo = new API.VectorIo(this); + vectorDBs: API.VectorDBs = new API.VectorDBs(this); + vectorStores: API.VectorStores = new API.VectorStores(this); + models: API.Models = new API.Models(this); + postTraining: API.PostTraining = new API.PostTraining(this); + providers: API.Providers = new API.Providers(this); + routes: API.Routes = new API.Routes(this); + safety: API.Safety = new API.Safety(this); + shields: API.Shields = new API.Shields(this); + syntheticDataGeneration: API.SyntheticDataGeneration = new API.SyntheticDataGeneration(this); + telemetry: API.Telemetry = new API.Telemetry(this); + scoring: API.Scoring = new API.Scoring(this); + scoringFunctions: API.ScoringFunctions = new API.ScoringFunctions(this); + benchmarks: API.Benchmarks = new API.Benchmarks(this); + files: API.Files = new API.Files(this); + + /** + * Check whether the base URL is set to its default. + */ + #baseURLOverridden(): boolean { + return this.baseURL !== 'http://any-hosted-llama-stack.com'; + } + + protected override defaultQuery(): Core.DefaultQuery | undefined { + return this._options.defaultQuery; + } + + protected override defaultHeaders(opts: Core.FinalRequestOptions): Core.Headers { + return { + ...super.defaultHeaders(opts), + ...this._options.defaultHeaders, + }; + } + + protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers { + if (this.apiKey == null) { + return {}; + } + return { Authorization: `Bearer ${this.apiKey}` }; + } + + protected override stringifyQuery(query: Record): string { + return qs.stringify(query, { arrayFormat: 'comma' }); + } + + static LlamaStackClient = this; + static DEFAULT_TIMEOUT = 60000; // 1 minute + + static LlamaStackClientError = Errors.LlamaStackClientError; + static APIError = Errors.APIError; + static APIConnectionError = Errors.APIConnectionError; + static APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; + static APIUserAbortError = Errors.APIUserAbortError; + static NotFoundError = Errors.NotFoundError; + static ConflictError = Errors.ConflictError; + static RateLimitError = Errors.RateLimitError; + static BadRequestError = Errors.BadRequestError; + static AuthenticationError = Errors.AuthenticationError; + static InternalServerError = Errors.InternalServerError; + static PermissionDeniedError = Errors.PermissionDeniedError; + static UnprocessableEntityError = Errors.UnprocessableEntityError; + + static toFile = Uploads.toFile; + static fileFromPath = Uploads.fileFromPath; +} + +LlamaStackClient.Toolgroups = Toolgroups; +LlamaStackClient.Tools = Tools; +LlamaStackClient.ToolRuntime = ToolRuntime; +LlamaStackClient.Responses = Responses; +LlamaStackClient.Agents = Agents; +LlamaStackClient.Datasets = Datasets; +LlamaStackClient.Eval = Eval; +LlamaStackClient.Inspect = Inspect; +LlamaStackClient.Inference = Inference; +LlamaStackClient.Embeddings = Embeddings; +LlamaStackClient.Chat = Chat; +LlamaStackClient.Completions = Completions; +LlamaStackClient.VectorIo = VectorIo; +LlamaStackClient.VectorDBs = VectorDBs; +LlamaStackClient.VectorStores = VectorStores; +LlamaStackClient.Models = Models; +LlamaStackClient.PostTraining = PostTraining; +LlamaStackClient.Providers = Providers; +LlamaStackClient.Routes = Routes; +LlamaStackClient.Safety = Safety; +LlamaStackClient.Shields = Shields; +LlamaStackClient.SyntheticDataGeneration = SyntheticDataGeneration; +LlamaStackClient.Telemetry = Telemetry; +LlamaStackClient.Scoring = Scoring; +LlamaStackClient.ScoringFunctions = ScoringFunctions; +LlamaStackClient.Benchmarks = Benchmarks; +LlamaStackClient.Files = Files; +export declare namespace LlamaStackClient { + export type RequestOptions = Core.RequestOptions; + + export import DatasetsIterrows = Pagination.DatasetsIterrows; + export { + type DatasetsIterrowsParams as DatasetsIterrowsParams, + type DatasetsIterrowsResponse as DatasetsIterrowsResponse, + }; + + export { + Toolgroups as Toolgroups, + type ListToolGroupsResponse as ListToolGroupsResponse, + type ToolGroup as ToolGroup, + type ToolgroupListResponse as ToolgroupListResponse, + type ToolgroupRegisterParams as ToolgroupRegisterParams, + }; + + export { + Tools as Tools, + type ListToolsResponse as ListToolsResponse, + type Tool as Tool, + type ToolListResponse as ToolListResponse, + type ToolListParams as ToolListParams, + }; + + export { + ToolRuntime as ToolRuntime, + type ToolDef as ToolDef, + type ToolInvocationResult as ToolInvocationResult, + type ToolRuntimeListToolsResponse as ToolRuntimeListToolsResponse, + type ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams, + type ToolRuntimeListToolsParams as ToolRuntimeListToolsParams, + }; + + export { + Responses as Responses, + type ResponseObject as ResponseObject, + type ResponseObjectStream as ResponseObjectStream, + type ResponseListResponse as ResponseListResponse, + type ResponseCreateParams as ResponseCreateParams, + type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, + type ResponseListParams as ResponseListParams, + }; + + export { + Agents as Agents, + type InferenceStep as InferenceStep, + type MemoryRetrievalStep as MemoryRetrievalStep, + type ShieldCallStep as ShieldCallStep, + type ToolExecutionStep as ToolExecutionStep, + type ToolResponse as ToolResponse, + type AgentCreateResponse as AgentCreateResponse, + type AgentRetrieveResponse as AgentRetrieveResponse, + type AgentListResponse as AgentListResponse, + type AgentCreateParams as AgentCreateParams, + type AgentListParams as AgentListParams, + }; + + export { + Datasets as Datasets, + type ListDatasetsResponse as ListDatasetsResponse, + type DatasetRetrieveResponse as DatasetRetrieveResponse, + type DatasetListResponse as DatasetListResponse, + type DatasetIterrowsResponse as DatasetIterrowsResponse, + type DatasetRegisterResponse as DatasetRegisterResponse, + type DatasetAppendrowsParams as DatasetAppendrowsParams, + type DatasetIterrowsParams as DatasetIterrowsParams, + type DatasetRegisterParams as DatasetRegisterParams, + }; + + export { + Eval as Eval, + type BenchmarkConfig as BenchmarkConfig, + type EvalCandidate as EvalCandidate, + type EvaluateResponse as EvaluateResponse, + type Job as Job, + type EvalEvaluateRowsParams as EvalEvaluateRowsParams, + type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams, + type EvalRunEvalParams as EvalRunEvalParams, + type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams, + }; + + export { + Inspect as Inspect, + type HealthInfo as HealthInfo, + type ProviderInfo as ProviderInfo, + type RouteInfo as RouteInfo, + type VersionInfo as VersionInfo, + }; + + export { + Inference as Inference, + type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, + type CompletionResponse as CompletionResponse, + type EmbeddingsResponse as EmbeddingsResponse, + type TokenLogProbs as TokenLogProbs, + type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse, + type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams, + type InferenceBatchCompletionParams as InferenceBatchCompletionParams, + type InferenceChatCompletionParams as InferenceChatCompletionParams, + type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, + type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, + type InferenceCompletionParams as InferenceCompletionParams, + type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming, + type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming, + type InferenceEmbeddingsParams as InferenceEmbeddingsParams, + }; + + export { + Embeddings as Embeddings, + type CreateEmbeddingsResponse as CreateEmbeddingsResponse, + type EmbeddingCreateParams as EmbeddingCreateParams, + }; + + export { Chat as Chat, type ChatCompletionChunk as ChatCompletionChunk }; + + export { + Completions as Completions, + type CompletionCreateResponse as CompletionCreateResponse, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; + + export { + VectorIo as VectorIo, + type QueryChunksResponse as QueryChunksResponse, + type VectorIoInsertParams as VectorIoInsertParams, + type VectorIoQueryParams as VectorIoQueryParams, + }; + + export { + VectorDBs as VectorDBs, + type ListVectorDBsResponse as ListVectorDBsResponse, + type VectorDBRetrieveResponse as VectorDBRetrieveResponse, + type VectorDBListResponse as VectorDBListResponse, + type VectorDBRegisterResponse as VectorDBRegisterResponse, + type VectorDBRegisterParams as VectorDBRegisterParams, + }; + + export { + VectorStores as VectorStores, + type ListVectorStoresResponse as ListVectorStoresResponse, + type VectorStore as VectorStore, + type VectorStoreDeleteResponse as VectorStoreDeleteResponse, + type VectorStoreSearchResponse as VectorStoreSearchResponse, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, + }; + + export { + Models as Models, + type ListModelsResponse as ListModelsResponse, + type Model as Model, + type ModelListResponse as ModelListResponse, + type ModelRegisterParams as ModelRegisterParams, + }; + + export { + PostTraining as PostTraining, + type AlgorithmConfig as AlgorithmConfig, + type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse, + type PostTrainingJob as PostTrainingJob, + type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams, + type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams, + }; + + export { + Providers as Providers, + type ListProvidersResponse as ListProvidersResponse, + type ProviderListResponse as ProviderListResponse, + }; + + export { + Routes as Routes, + type ListRoutesResponse as ListRoutesResponse, + type RouteListResponse as RouteListResponse, + }; + + export { + Safety as Safety, + type RunShieldResponse as RunShieldResponse, + type SafetyRunShieldParams as SafetyRunShieldParams, + }; + + export { + Shields as Shields, + type ListShieldsResponse as ListShieldsResponse, + type Shield as Shield, + type ShieldListResponse as ShieldListResponse, + type ShieldRegisterParams as ShieldRegisterParams, + }; + + export { + SyntheticDataGeneration as SyntheticDataGeneration, + type SyntheticDataGenerationResponse as SyntheticDataGenerationResponse, + type SyntheticDataGenerationGenerateParams as SyntheticDataGenerationGenerateParams, + }; + + export { + Telemetry as Telemetry, + type Event as Event, + type QueryCondition as QueryCondition, + type QuerySpansResponse as QuerySpansResponse, + type SpanWithStatus as SpanWithStatus, + type Trace as Trace, + type TelemetryGetSpanResponse as TelemetryGetSpanResponse, + type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse, + type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse, + type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse, + type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams, + type TelemetryLogEventParams as TelemetryLogEventParams, + type TelemetryQuerySpansParams as TelemetryQuerySpansParams, + type TelemetryQueryTracesParams as TelemetryQueryTracesParams, + type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams, + }; + + export { + Scoring as Scoring, + type ScoringScoreResponse as ScoringScoreResponse, + type ScoringScoreBatchResponse as ScoringScoreBatchResponse, + type ScoringScoreParams as ScoringScoreParams, + type ScoringScoreBatchParams as ScoringScoreBatchParams, + }; + + export { + ScoringFunctions as ScoringFunctions, + type ListScoringFunctionsResponse as ListScoringFunctionsResponse, + type ScoringFn as ScoringFn, + type ScoringFnParams as ScoringFnParams, + type ScoringFunctionListResponse as ScoringFunctionListResponse, + type ScoringFunctionRegisterParams as ScoringFunctionRegisterParams, + }; + + export { + Benchmarks as Benchmarks, + type Benchmark as Benchmark, + type ListBenchmarksResponse as ListBenchmarksResponse, + type BenchmarkListResponse as BenchmarkListResponse, + type BenchmarkRegisterParams as BenchmarkRegisterParams, + }; + + export { + Files as Files, + type DeleteFileResponse as DeleteFileResponse, + type File as File, + type ListFilesResponse as ListFilesResponse, + type FileContentResponse as FileContentResponse, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; + + export type AgentConfig = API.AgentConfig; + export type BatchCompletion = API.BatchCompletion; + export type ChatCompletionResponse = API.ChatCompletionResponse; + export type CompletionMessage = API.CompletionMessage; + export type ContentDelta = API.ContentDelta; + export type Document = API.Document; + export type InterleavedContent = API.InterleavedContent; + export type InterleavedContentItem = API.InterleavedContentItem; + export type Message = API.Message; + export type ParamType = API.ParamType; + export type QueryConfig = API.QueryConfig; + export type QueryGeneratorConfig = API.QueryGeneratorConfig; + export type QueryResult = API.QueryResult; + export type ResponseFormat = API.ResponseFormat; + export type ReturnType = API.ReturnType; + export type SafetyViolation = API.SafetyViolation; + export type SamplingParams = API.SamplingParams; + export type ScoringResult = API.ScoringResult; + export type SystemMessage = API.SystemMessage; + export type ToolCall = API.ToolCall; + export type ToolCallOrString = API.ToolCallOrString; + export type ToolParamDefinition = API.ToolParamDefinition; + export type ToolResponseMessage = API.ToolResponseMessage; + export type UserMessage = API.UserMessage; +} + +export { toFile, fileFromPath } from './uploads'; export { LlamaStackClientError, APIError, @@ -19,4 +709,6 @@ export { InternalServerError, PermissionDeniedError, UnprocessableEntityError, -} from './core/error'; +} from './error'; + +export default LlamaStackClient; diff --git a/src/internal/README.md b/src/internal/README.md deleted file mode 100644 index 3ef5a25..0000000 --- a/src/internal/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `internal` - -The modules in this directory are not importable outside this package and will change between releases. diff --git a/src/internal/builtin-types.ts b/src/internal/builtin-types.ts deleted file mode 100644 index c23d3bd..0000000 --- a/src/internal/builtin-types.ts +++ /dev/null @@ -1,93 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export type Fetch = (input: string | URL | Request, init?: RequestInit) => Promise; - -/** - * An alias to the builtin `RequestInit` type so we can - * easily alias it in import statements if there are name clashes. - * - * https://developer.mozilla.org/docs/Web/API/RequestInit - */ -type _RequestInit = RequestInit; - -/** - * An alias to the builtin `Response` type so we can - * easily alias it in import statements if there are name clashes. - * - * https://developer.mozilla.org/docs/Web/API/Response - */ -type _Response = Response; - -/** - * The type for the first argument to `fetch`. - * - * https://developer.mozilla.org/docs/Web/API/Window/fetch#resource - */ -type _RequestInfo = Request | URL | string; - -/** - * The type for constructing `RequestInit` Headers. - * - * https://developer.mozilla.org/docs/Web/API/RequestInit#setting_headers - */ -type _HeadersInit = RequestInit['headers']; - -/** - * The type for constructing `RequestInit` body. - * - * https://developer.mozilla.org/docs/Web/API/RequestInit#body - */ -type _BodyInit = RequestInit['body']; - -/** - * An alias to the builtin `Array` type so we can - * easily alias it in import statements if there are name clashes. - */ -type _Array = Array; - -/** - * An alias to the builtin `Record` type so we can - * easily alias it in import statements if there are name clashes. - */ -type _Record = Record; - -export type { - _Array as Array, - _BodyInit as BodyInit, - _HeadersInit as HeadersInit, - _Record as Record, - _RequestInfo as RequestInfo, - _RequestInit as RequestInit, - _Response as Response, -}; - -/** - * A copy of the builtin `EndingType` type as it isn't fully supported in certain - * environments and attempting to reference the global version will error. - * - * https://github.com/microsoft/TypeScript/blob/49ad1a3917a0ea57f5ff248159256e12bb1cb705/src/lib/dom.generated.d.ts#L27941 - */ -type EndingType = 'native' | 'transparent'; - -/** - * A copy of the builtin `BlobPropertyBag` type as it isn't fully supported in certain - * environments and attempting to reference the global version will error. - * - * https://github.com/microsoft/TypeScript/blob/49ad1a3917a0ea57f5ff248159256e12bb1cb705/src/lib/dom.generated.d.ts#L154 - * https://developer.mozilla.org/en-US/docs/Web/API/Blob/Blob#options - */ -export interface BlobPropertyBag { - endings?: EndingType; - type?: string; -} - -/** - * A copy of the builtin `FilePropertyBag` type as it isn't fully supported in certain - * environments and attempting to reference the global version will error. - * - * https://github.com/microsoft/TypeScript/blob/49ad1a3917a0ea57f5ff248159256e12bb1cb705/src/lib/dom.generated.d.ts#L503 - * https://developer.mozilla.org/en-US/docs/Web/API/File/File#options - */ -export interface FilePropertyBag extends BlobPropertyBag { - lastModified?: number; -} diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts new file mode 100644 index 0000000..b5356e0 --- /dev/null +++ b/src/internal/decoders/line.ts @@ -0,0 +1,176 @@ +import { LlamaStackClientError } from '../../error'; + +export type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; + +/** + * A re-implementation of httpx's `LineDecoder` in Python that handles incrementally + * reading lines from text. + * + * https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258 + */ +export class LineDecoder { + // prettier-ignore + static NEWLINE_CHARS = new Set(['\n', '\r']); + static NEWLINE_REGEXP = /\r\n|[\n\r]/g; + + buffer: Uint8Array; + #carriageReturnIndex: number | null; + textDecoder: any; // TextDecoder found in browsers; not typed to avoid pulling in either "dom" or "node" types. + + constructor() { + this.buffer = new Uint8Array(); + this.#carriageReturnIndex = null; + } + + decode(chunk: Bytes): string[] { + if (chunk == null) { + return []; + } + + const binaryChunk = + chunk instanceof ArrayBuffer ? new Uint8Array(chunk) + : typeof chunk === 'string' ? new TextEncoder().encode(chunk) + : chunk; + + let newData = new Uint8Array(this.buffer.length + binaryChunk.length); + newData.set(this.buffer); + newData.set(binaryChunk, this.buffer.length); + this.buffer = newData; + + const lines: string[] = []; + let patternIndex; + while ((patternIndex = findNewlineIndex(this.buffer, this.#carriageReturnIndex)) != null) { + if (patternIndex.carriage && this.#carriageReturnIndex == null) { + // skip until we either get a corresponding `\n`, a new `\r` or nothing + this.#carriageReturnIndex = patternIndex.index; + continue; + } + + // we got double \r or \rtext\n + if ( + this.#carriageReturnIndex != null && + (patternIndex.index !== this.#carriageReturnIndex + 1 || patternIndex.carriage) + ) { + lines.push(this.decodeText(this.buffer.slice(0, this.#carriageReturnIndex - 1))); + this.buffer = this.buffer.slice(this.#carriageReturnIndex); + this.#carriageReturnIndex = null; + continue; + } + + const endIndex = + this.#carriageReturnIndex !== null ? patternIndex.preceding - 1 : patternIndex.preceding; + + const line = this.decodeText(this.buffer.slice(0, endIndex)); + lines.push(line); + + this.buffer = this.buffer.slice(patternIndex.index); + this.#carriageReturnIndex = null; + } + + return lines; + } + + decodeText(bytes: Bytes): string { + if (bytes == null) return ''; + if (typeof bytes === 'string') return bytes; + + // Node: + if (typeof Buffer !== 'undefined') { + if (bytes instanceof Buffer) { + return bytes.toString(); + } + if (bytes instanceof Uint8Array) { + return Buffer.from(bytes).toString(); + } + + throw new LlamaStackClientError( + `Unexpected: received non-Uint8Array (${bytes.constructor.name}) stream chunk in an environment with a global "Buffer" defined, which this library assumes to be Node. Please report this error.`, + ); + } + + // Browser + if (typeof TextDecoder !== 'undefined') { + if (bytes instanceof Uint8Array || bytes instanceof ArrayBuffer) { + this.textDecoder ??= new TextDecoder('utf8'); + return this.textDecoder.decode(bytes); + } + + throw new LlamaStackClientError( + `Unexpected: received non-Uint8Array/ArrayBuffer (${ + (bytes as any).constructor.name + }) in a web platform. Please report this error.`, + ); + } + + throw new LlamaStackClientError( + `Unexpected: neither Buffer nor TextDecoder are available as globals. Please report this error.`, + ); + } + + flush(): string[] { + if (!this.buffer.length) { + return []; + } + return this.decode('\n'); + } +} + +/** + * This function searches the buffer for the end patterns, (\r or \n) + * and returns an object with the index preceding the matched newline and the + * index after the newline char. `null` is returned if no new line is found. + * + * ```ts + * findNewLineIndex('abc\ndef') -> { preceding: 2, index: 3 } + * ``` + */ +function findNewlineIndex( + buffer: Uint8Array, + startIndex: number | null, +): { preceding: number; index: number; carriage: boolean } | null { + const newline = 0x0a; // \n + const carriage = 0x0d; // \r + + for (let i = startIndex ?? 0; i < buffer.length; i++) { + if (buffer[i] === newline) { + return { preceding: i, index: i + 1, carriage: false }; + } + + if (buffer[i] === carriage) { + return { preceding: i, index: i + 1, carriage: true }; + } + } + + return null; +} + +export function findDoubleNewlineIndex(buffer: Uint8Array): number { + // This function searches the buffer for the end patterns (\r\r, \n\n, \r\n\r\n) + // and returns the index right after the first occurrence of any pattern, + // or -1 if none of the patterns are found. + const newline = 0x0a; // \n + const carriage = 0x0d; // \r + + for (let i = 0; i < buffer.length - 1; i++) { + if (buffer[i] === newline && buffer[i + 1] === newline) { + // \n\n + return i + 2; + } + if (buffer[i] === carriage && buffer[i + 1] === carriage) { + // \r\r + return i + 2; + } + if ( + buffer[i] === carriage && + buffer[i + 1] === newline && + i + 3 < buffer.length && + buffer[i + 2] === carriage && + buffer[i + 3] === newline + ) { + // \r\n\r\n + return i + 4; + } + } + + return -1; +} diff --git a/src/internal/detect-platform.ts b/src/internal/detect-platform.ts deleted file mode 100644 index e82d95c..0000000 --- a/src/internal/detect-platform.ts +++ /dev/null @@ -1,196 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { VERSION } from '../version'; - -export const isRunningInBrowser = () => { - return ( - // @ts-ignore - typeof window !== 'undefined' && - // @ts-ignore - typeof window.document !== 'undefined' && - // @ts-ignore - typeof navigator !== 'undefined' - ); -}; - -type DetectedPlatform = 'deno' | 'node' | 'edge' | 'unknown'; - -/** - * Note this does not detect 'browser'; for that, use getBrowserInfo(). - */ -function getDetectedPlatform(): DetectedPlatform { - if (typeof Deno !== 'undefined' && Deno.build != null) { - return 'deno'; - } - if (typeof EdgeRuntime !== 'undefined') { - return 'edge'; - } - if ( - Object.prototype.toString.call( - typeof (globalThis as any).process !== 'undefined' ? (globalThis as any).process : 0, - ) === '[object process]' - ) { - return 'node'; - } - return 'unknown'; -} - -declare const Deno: any; -declare const EdgeRuntime: any; -type Arch = 'x32' | 'x64' | 'arm' | 'arm64' | `other:${string}` | 'unknown'; -type PlatformName = - | 'MacOS' - | 'Linux' - | 'Windows' - | 'FreeBSD' - | 'OpenBSD' - | 'iOS' - | 'Android' - | `Other:${string}` - | 'Unknown'; -type Browser = 'ie' | 'edge' | 'chrome' | 'firefox' | 'safari'; -type PlatformProperties = { - 'X-Stainless-Lang': 'js'; - 'X-Stainless-Package-Version': string; - 'X-Stainless-OS': PlatformName; - 'X-Stainless-Arch': Arch; - 'X-Stainless-Runtime': 'node' | 'deno' | 'edge' | `browser:${Browser}` | 'unknown'; - 'X-Stainless-Runtime-Version': string; -}; -const getPlatformProperties = (): PlatformProperties => { - const detectedPlatform = getDetectedPlatform(); - if (detectedPlatform === 'deno') { - return { - 'X-Stainless-Lang': 'js', - 'X-Stainless-Package-Version': VERSION, - 'X-Stainless-OS': normalizePlatform(Deno.build.os), - 'X-Stainless-Arch': normalizeArch(Deno.build.arch), - 'X-Stainless-Runtime': 'deno', - 'X-Stainless-Runtime-Version': - typeof Deno.version === 'string' ? Deno.version : Deno.version?.deno ?? 'unknown', - }; - } - if (typeof EdgeRuntime !== 'undefined') { - return { - 'X-Stainless-Lang': 'js', - 'X-Stainless-Package-Version': VERSION, - 'X-Stainless-OS': 'Unknown', - 'X-Stainless-Arch': `other:${EdgeRuntime}`, - 'X-Stainless-Runtime': 'edge', - 'X-Stainless-Runtime-Version': (globalThis as any).process.version, - }; - } - // Check if Node.js - if (detectedPlatform === 'node') { - return { - 'X-Stainless-Lang': 'js', - 'X-Stainless-Package-Version': VERSION, - 'X-Stainless-OS': normalizePlatform((globalThis as any).process.platform ?? 'unknown'), - 'X-Stainless-Arch': normalizeArch((globalThis as any).process.arch ?? 'unknown'), - 'X-Stainless-Runtime': 'node', - 'X-Stainless-Runtime-Version': (globalThis as any).process.version ?? 'unknown', - }; - } - - const browserInfo = getBrowserInfo(); - if (browserInfo) { - return { - 'X-Stainless-Lang': 'js', - 'X-Stainless-Package-Version': VERSION, - 'X-Stainless-OS': 'Unknown', - 'X-Stainless-Arch': 'unknown', - 'X-Stainless-Runtime': `browser:${browserInfo.browser}`, - 'X-Stainless-Runtime-Version': browserInfo.version, - }; - } - - // TODO add support for Cloudflare workers, etc. - return { - 'X-Stainless-Lang': 'js', - 'X-Stainless-Package-Version': VERSION, - 'X-Stainless-OS': 'Unknown', - 'X-Stainless-Arch': 'unknown', - 'X-Stainless-Runtime': 'unknown', - 'X-Stainless-Runtime-Version': 'unknown', - }; -}; - -type BrowserInfo = { - browser: Browser; - version: string; -}; - -declare const navigator: { userAgent: string } | undefined; - -// Note: modified from https://github.com/JS-DevTools/host-environment/blob/b1ab79ecde37db5d6e163c050e54fe7d287d7c92/src/isomorphic.browser.ts -function getBrowserInfo(): BrowserInfo | null { - if (typeof navigator === 'undefined' || !navigator) { - return null; - } - - // NOTE: The order matters here! - const browserPatterns = [ - { key: 'edge' as const, pattern: /Edge(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, - { key: 'ie' as const, pattern: /MSIE(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, - { key: 'ie' as const, pattern: /Trident(?:.*rv\:(\d+)\.(\d+)(?:\.(\d+))?)?/ }, - { key: 'chrome' as const, pattern: /Chrome(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, - { key: 'firefox' as const, pattern: /Firefox(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, - { key: 'safari' as const, pattern: /(?:Version\W+(\d+)\.(\d+)(?:\.(\d+))?)?(?:\W+Mobile\S*)?\W+Safari/ }, - ]; - - // Find the FIRST matching browser - for (const { key, pattern } of browserPatterns) { - const match = pattern.exec(navigator.userAgent); - if (match) { - const major = match[1] || 0; - const minor = match[2] || 0; - const patch = match[3] || 0; - - return { browser: key, version: `${major}.${minor}.${patch}` }; - } - } - - return null; -} - -const normalizeArch = (arch: string): Arch => { - // Node docs: - // - https://nodejs.org/api/process.html#processarch - // Deno docs: - // - https://doc.deno.land/deno/stable/~/Deno.build - if (arch === 'x32') return 'x32'; - if (arch === 'x86_64' || arch === 'x64') return 'x64'; - if (arch === 'arm') return 'arm'; - if (arch === 'aarch64' || arch === 'arm64') return 'arm64'; - if (arch) return `other:${arch}`; - return 'unknown'; -}; - -const normalizePlatform = (platform: string): PlatformName => { - // Node platforms: - // - https://nodejs.org/api/process.html#processplatform - // Deno platforms: - // - https://doc.deno.land/deno/stable/~/Deno.build - // - https://github.com/denoland/deno/issues/14799 - - platform = platform.toLowerCase(); - - // NOTE: this iOS check is untested and may not work - // Node does not work natively on IOS, there is a fork at - // https://github.com/nodejs-mobile/nodejs-mobile - // however it is unknown at the time of writing how to detect if it is running - if (platform.includes('ios')) return 'iOS'; - if (platform === 'android') return 'Android'; - if (platform === 'darwin') return 'MacOS'; - if (platform === 'win32') return 'Windows'; - if (platform === 'freebsd') return 'FreeBSD'; - if (platform === 'openbsd') return 'OpenBSD'; - if (platform === 'linux') return 'Linux'; - if (platform) return `Other:${platform}`; - return 'Unknown'; -}; - -let _platformHeaders: PlatformProperties; -export const getPlatformHeaders = () => { - return (_platformHeaders ??= getPlatformProperties()); -}; diff --git a/src/internal/errors.ts b/src/internal/errors.ts deleted file mode 100644 index 82c7b14..0000000 --- a/src/internal/errors.ts +++ /dev/null @@ -1,33 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export function isAbortError(err: unknown) { - return ( - typeof err === 'object' && - err !== null && - // Spec-compliant fetch implementations - (('name' in err && (err as any).name === 'AbortError') || - // Expo fetch - ('message' in err && String((err as any).message).includes('FetchRequestCanceledException'))) - ); -} - -export const castToError = (err: any): Error => { - if (err instanceof Error) return err; - if (typeof err === 'object' && err !== null) { - try { - if (Object.prototype.toString.call(err) === '[object Error]') { - // @ts-ignore - not all envs have native support for cause yet - const error = new Error(err.message, err.cause ? { cause: err.cause } : {}); - if (err.stack) error.stack = err.stack; - // @ts-ignore - not all envs have native support for cause yet - if (err.cause && !error.cause) error.cause = err.cause; - if (err.name) error.name = err.name; - return error; - } - } catch {} - try { - return new Error(JSON.stringify(err)); - } catch {} - } - return new Error(err); -}; diff --git a/src/internal/headers.ts b/src/internal/headers.ts deleted file mode 100644 index c724a9d..0000000 --- a/src/internal/headers.ts +++ /dev/null @@ -1,97 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { isReadonlyArray } from './utils/values'; - -type HeaderValue = string | undefined | null; -export type HeadersLike = - | Headers - | readonly HeaderValue[][] - | Record - | undefined - | null - | NullableHeaders; - -const brand_privateNullableHeaders = /* @__PURE__ */ Symbol('brand.privateNullableHeaders'); - -/** - * @internal - * Users can pass explicit nulls to unset default headers. When we parse them - * into a standard headers type we need to preserve that information. - */ -export type NullableHeaders = { - /** Brand check, prevent users from creating a NullableHeaders. */ - [brand_privateNullableHeaders]: true; - /** Parsed headers. */ - values: Headers; - /** Set of lowercase header names explicitly set to null. */ - nulls: Set; -}; - -function* iterateHeaders(headers: HeadersLike): IterableIterator { - if (!headers) return; - - if (brand_privateNullableHeaders in headers) { - const { values, nulls } = headers; - yield* values.entries(); - for (const name of nulls) { - yield [name, null]; - } - return; - } - - let shouldClear = false; - let iter: Iterable; - if (headers instanceof Headers) { - iter = headers.entries(); - } else if (isReadonlyArray(headers)) { - iter = headers; - } else { - shouldClear = true; - iter = Object.entries(headers ?? {}); - } - for (let row of iter) { - const name = row[0]; - if (typeof name !== 'string') throw new TypeError('expected header name to be a string'); - const values = isReadonlyArray(row[1]) ? row[1] : [row[1]]; - let didClear = false; - for (const value of values) { - if (value === undefined) continue; - - // Objects keys always overwrite older headers, they never append. - // Yield a null to clear the header before adding the new values. - if (shouldClear && !didClear) { - didClear = true; - yield [name, null]; - } - yield [name, value]; - } - } -} - -export const buildHeaders = (newHeaders: HeadersLike[]): NullableHeaders => { - const targetHeaders = new Headers(); - const nullHeaders = new Set(); - for (const headers of newHeaders) { - const seenHeaders = new Set(); - for (const [name, value] of iterateHeaders(headers)) { - const lowerName = name.toLowerCase(); - if (!seenHeaders.has(lowerName)) { - targetHeaders.delete(name); - seenHeaders.add(lowerName); - } - if (value === null) { - targetHeaders.delete(name); - nullHeaders.add(lowerName); - } else { - targetHeaders.append(name, value); - nullHeaders.delete(lowerName); - } - } - } - return { [brand_privateNullableHeaders]: true, values: targetHeaders, nulls: nullHeaders }; -}; - -export const isEmptyHeaders = (headers: HeadersLike) => { - for (const _ of iterateHeaders(headers)) return false; - return true; -}; diff --git a/src/internal/parse.ts b/src/internal/parse.ts deleted file mode 100644 index d1a8658..0000000 --- a/src/internal/parse.ts +++ /dev/null @@ -1,50 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import type { FinalRequestOptions } from './request-options'; -import { type LlamaStackClient } from '../client'; -import { formatRequestDetails, loggerFor } from './utils/log'; - -export type APIResponseProps = { - response: Response; - options: FinalRequestOptions; - controller: AbortController; - requestLogID: string; - retryOfRequestLogID: string | undefined; - startTime: number; -}; - -export async function defaultParseResponse(client: LlamaStackClient, props: APIResponseProps): Promise { - const { response, requestLogID, retryOfRequestLogID, startTime } = props; - const body = await (async () => { - // fetch refuses to read the body when the status code is 204. - if (response.status === 204) { - return null as T; - } - - if (props.options.__binaryResponse) { - return response as unknown as T; - } - - const contentType = response.headers.get('content-type'); - const mediaType = contentType?.split(';')[0]?.trim(); - const isJSON = mediaType?.includes('application/json') || mediaType?.endsWith('+json'); - if (isJSON) { - const json = await response.json(); - return json as T; - } - - const text = await response.text(); - return text as unknown as T; - })(); - loggerFor(client).debug( - `[${requestLogID}] response parsed`, - formatRequestDetails({ - retryOfRequestLogID, - url: response.url, - status: response.status, - body, - durationMs: Date.now() - startTime, - }), - ); - return body; -} diff --git a/src/internal/qs/formats.ts b/src/internal/qs/formats.ts index e76a742..1cf9e2c 100644 --- a/src/internal/qs/formats.ts +++ b/src/internal/qs/formats.ts @@ -1,10 +1,9 @@ import type { Format } from './types'; export const default_format: Format = 'RFC3986'; -export const default_formatter = (v: PropertyKey) => String(v); export const formatters: Record string> = { RFC1738: (v: PropertyKey) => String(v).replace(/%20/g, '+'), - RFC3986: default_formatter, + RFC3986: (v: PropertyKey) => String(v), }; export const RFC1738 = 'RFC1738'; export const RFC3986 = 'RFC3986'; diff --git a/src/internal/qs/stringify.ts b/src/internal/qs/stringify.ts index 7e71387..6749756 100644 --- a/src/internal/qs/stringify.ts +++ b/src/internal/qs/stringify.ts @@ -1,7 +1,8 @@ -import { encode, is_buffer, maybe_map, has } from './utils'; -import { default_format, default_formatter, formatters } from './formats'; +import { encode, is_buffer, maybe_map } from './utils'; +import { default_format, formatters } from './formats'; import type { NonNullableProperties, StringifyOptions } from './types'; -import { isArray } from '../utils/values'; + +const has = Object.prototype.hasOwnProperty; const array_prefix_generators = { brackets(prefix: PropertyKey) { @@ -16,11 +17,13 @@ const array_prefix_generators = { }, }; +const is_array = Array.isArray; +const push = Array.prototype.push; const push_to_array = function (arr: any[], value_or_array: any) { - Array.prototype.push.apply(arr, isArray(value_or_array) ? value_or_array : [value_or_array]); + push.apply(arr, is_array(value_or_array) ? value_or_array : [value_or_array]); }; -let toISOString; +const to_ISO = Date.prototype.toISOString; const defaults = { addQueryPrefix: false, @@ -35,11 +38,11 @@ const defaults = { encoder: encode, encodeValuesOnly: false, format: default_format, - formatter: default_formatter, + formatter: formatters[default_format], /** @deprecated */ indices: false, serializeDate(date) { - return (toISOString ??= Function.prototype.call.bind(Date.prototype.toISOString))(date); + return to_ISO.call(date); }, skipNulls: false, strictNullHandling: false, @@ -102,7 +105,7 @@ function inner_stringify( obj = filter(prefix, obj); } else if (obj instanceof Date) { obj = serializeDate?.(obj); - } else if (generateArrayPrefix === 'comma' && isArray(obj)) { + } else if (generateArrayPrefix === 'comma' && is_array(obj)) { obj = maybe_map(obj, function (value) { if (value instanceof Date) { return serializeDate?.(value); @@ -145,14 +148,14 @@ function inner_stringify( } let obj_keys; - if (generateArrayPrefix === 'comma' && isArray(obj)) { + if (generateArrayPrefix === 'comma' && is_array(obj)) { // we need to join elements in if (encodeValuesOnly && encoder) { // @ts-expect-error values only obj = maybe_map(obj, encoder); } obj_keys = [{ value: obj.length > 0 ? obj.join(',') || null : void undefined }]; - } else if (isArray(filter)) { + } else if (is_array(filter)) { obj_keys = filter; } else { const keys = Object.keys(obj); @@ -162,9 +165,9 @@ function inner_stringify( const encoded_prefix = encodeDotInKeys ? String(prefix).replace(/\./g, '%2E') : String(prefix); const adjusted_prefix = - commaRoundTrip && isArray(obj) && obj.length === 1 ? encoded_prefix + '[]' : encoded_prefix; + commaRoundTrip && is_array(obj) && obj.length === 1 ? encoded_prefix + '[]' : encoded_prefix; - if (allowEmptyArrays && isArray(obj) && obj.length === 0) { + if (allowEmptyArrays && is_array(obj) && obj.length === 0) { return adjusted_prefix + '[]'; } @@ -181,7 +184,7 @@ function inner_stringify( // @ts-ignore const encoded_key = allowDots && encodeDotInKeys ? (key as any).replace(/\./g, '%2E') : key; const key_prefix = - isArray(obj) ? + is_array(obj) ? typeof generateArrayPrefix === 'function' ? generateArrayPrefix(adjusted_prefix, encoded_key) : adjusted_prefix @@ -202,7 +205,7 @@ function inner_stringify( skipNulls, encodeDotInKeys, // @ts-ignore - generateArrayPrefix === 'comma' && encodeValuesOnly && isArray(obj) ? null : encoder, + generateArrayPrefix === 'comma' && encodeValuesOnly && is_array(obj) ? null : encoder, filter, sort, allowDots, @@ -241,7 +244,7 @@ function normalize_stringify_options( let format = default_format; if (typeof opts.format !== 'undefined') { - if (!has(formatters, opts.format)) { + if (!has.call(formatters, opts.format)) { throw new TypeError('Unknown format option provided.'); } format = opts.format; @@ -249,7 +252,7 @@ function normalize_stringify_options( const formatter = formatters[format]; let filter = defaults.filter; - if (typeof opts.filter === 'function' || isArray(opts.filter)) { + if (typeof opts.filter === 'function' || is_array(opts.filter)) { filter = opts.filter; } @@ -313,7 +316,7 @@ export function stringify(object: any, opts: StringifyOptions = {}) { if (typeof options.filter === 'function') { filter = options.filter; obj = filter('', obj); - } else if (isArray(options.filter)) { + } else if (is_array(options.filter)) { filter = options.filter; obj_keys = filter; } diff --git a/src/internal/qs/utils.ts b/src/internal/qs/utils.ts index 4cd5657..113b18f 100644 --- a/src/internal/qs/utils.ts +++ b/src/internal/qs/utils.ts @@ -1,13 +1,10 @@ import { RFC1738 } from './formats'; import type { DefaultEncoder, Format } from './types'; -import { isArray } from '../utils/values'; -export let has = (obj: object, key: PropertyKey): boolean => ( - (has = (Object as any).hasOwn ?? Function.prototype.call.bind(Object.prototype.hasOwnProperty)), - has(obj, key) -); +const has = Object.prototype.hasOwnProperty; +const is_array = Array.isArray; -const hex_table = /* @__PURE__ */ (() => { +const hex_table = (() => { const array = []; for (let i = 0; i < 256; ++i) { array.push('%' + ((i < 16 ? '0' : '') + i.toString(16)).toUpperCase()); @@ -23,7 +20,7 @@ function compact_queue>(queue: Array<{ obj: T; pro const obj = item.obj[item.prop]; - if (isArray(obj)) { + if (is_array(obj)) { const compacted: unknown[] = []; for (let j = 0; j < obj.length; ++j) { @@ -59,10 +56,13 @@ export function merge( } if (typeof source !== 'object') { - if (isArray(target)) { + if (is_array(target)) { target.push(source); } else if (target && typeof target === 'object') { - if ((options && (options.plainObjects || options.allowPrototypes)) || !has(Object.prototype, source)) { + if ( + (options && (options.plainObjects || options.allowPrototypes)) || + !has.call(Object.prototype, source) + ) { target[source] = true; } } else { @@ -77,14 +77,14 @@ export function merge( } let mergeTarget = target; - if (isArray(target) && !isArray(source)) { + if (is_array(target) && !is_array(source)) { // @ts-ignore mergeTarget = array_to_object(target, options); } - if (isArray(target) && isArray(source)) { + if (is_array(target) && is_array(source)) { source.forEach(function (item, i) { - if (has(target, i)) { + if (has.call(target, i)) { const targetItem = target[i]; if (targetItem && typeof targetItem === 'object' && item && typeof item === 'object') { target[i] = merge(targetItem, item, options); @@ -101,7 +101,7 @@ export function merge( return Object.keys(source).reduce(function (acc, key) { const value = source[key]; - if (has(acc, key)) { + if (has.call(acc, key)) { acc[key] = merge(acc[key], value, options); } else { acc[key] = value; @@ -254,7 +254,7 @@ export function combine(a: any, b: any) { } export function maybe_map(val: T[], fn: (v: T) => T) { - if (isArray(val)) { + if (is_array(val)) { const mapped = []; for (let i = 0; i < val.length; i += 1) { mapped.push(fn(val[i]!)); diff --git a/src/internal/request-options.ts b/src/internal/request-options.ts deleted file mode 100644 index 7de032f..0000000 --- a/src/internal/request-options.ts +++ /dev/null @@ -1,38 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { NullableHeaders } from './headers'; - -import type { BodyInit } from './builtin-types'; -import type { HTTPMethod, MergedRequestInit } from './types'; -import { type HeadersLike } from './headers'; - -export type FinalRequestOptions = RequestOptions & { method: HTTPMethod; path: string }; - -export type RequestOptions = { - method?: HTTPMethod; - path?: string; - query?: object | undefined | null; - body?: unknown; - headers?: HeadersLike; - maxRetries?: number; - stream?: boolean | undefined; - timeout?: number; - fetchOptions?: MergedRequestInit; - signal?: AbortSignal | undefined | null; - idempotencyKey?: string; - defaultBaseURL?: string | undefined; - - __binaryResponse?: boolean | undefined; -}; - -export type EncodedContent = { bodyHeaders: HeadersLike; body: BodyInit }; -export type RequestEncoder = (request: { headers: NullableHeaders; body: unknown }) => EncodedContent; - -export const FallbackEncoder: RequestEncoder = ({ headers, body }) => { - return { - bodyHeaders: { - 'content-type': 'application/json', - }, - body: JSON.stringify(body), - }; -}; diff --git a/src/internal/shim-types.ts b/src/internal/shim-types.ts deleted file mode 100644 index 8ddf7b0..0000000 --- a/src/internal/shim-types.ts +++ /dev/null @@ -1,26 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -/** - * Shims for types that we can't always rely on being available globally. - * - * Note: these only exist at the type-level, there is no corresponding runtime - * version for any of these symbols. - */ - -type NeverToAny = T extends never ? any : T; - -/** @ts-ignore */ -type _DOMReadableStream = globalThis.ReadableStream; - -/** @ts-ignore */ -type _NodeReadableStream = import('stream/web').ReadableStream; - -type _ConditionalNodeReadableStream = - typeof globalThis extends { ReadableStream: any } ? never : _NodeReadableStream; - -type _ReadableStream = NeverToAny< - | ([0] extends [1 & _DOMReadableStream] ? never : _DOMReadableStream) - | ([0] extends [1 & _ConditionalNodeReadableStream] ? never : _ConditionalNodeReadableStream) ->; - -export type { _ReadableStream as ReadableStream }; diff --git a/src/internal/shims.ts b/src/internal/shims.ts deleted file mode 100644 index d058450..0000000 --- a/src/internal/shims.ts +++ /dev/null @@ -1,107 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -/** - * This module provides internal shims and utility functions for environments where certain Node.js or global types may not be available. - * - * These are used to ensure we can provide a consistent behaviour between different JavaScript environments and good error - * messages in cases where an environment isn't fully supported. - */ - -import type { Fetch } from './builtin-types'; -import type { ReadableStream } from './shim-types'; - -export function getDefaultFetch(): Fetch { - if (typeof fetch !== 'undefined') { - return fetch as any; - } - - throw new Error( - '`fetch` is not defined as a global; Either pass `fetch` to the client, `new LlamaStackClient({ fetch })` or polyfill the global, `globalThis.fetch = fetch`', - ); -} - -type ReadableStreamArgs = ConstructorParameters; - -export function makeReadableStream(...args: ReadableStreamArgs): ReadableStream { - const ReadableStream = (globalThis as any).ReadableStream; - if (typeof ReadableStream === 'undefined') { - // Note: All of the platforms / runtimes we officially support already define - // `ReadableStream` as a global, so this should only ever be hit on unsupported runtimes. - throw new Error( - '`ReadableStream` is not defined as a global; You will need to polyfill it, `globalThis.ReadableStream = ReadableStream`', - ); - } - - return new ReadableStream(...args); -} - -export function ReadableStreamFrom(iterable: Iterable | AsyncIterable): ReadableStream { - let iter: AsyncIterator | Iterator = - Symbol.asyncIterator in iterable ? iterable[Symbol.asyncIterator]() : iterable[Symbol.iterator](); - - return makeReadableStream({ - start() {}, - async pull(controller: any) { - const { done, value } = await iter.next(); - if (done) { - controller.close(); - } else { - controller.enqueue(value); - } - }, - async cancel() { - await iter.return?.(); - }, - }); -} - -/** - * Most browsers don't yet have async iterable support for ReadableStream, - * and Node has a very different way of reading bytes from its "ReadableStream". - * - * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 - */ -export function ReadableStreamToAsyncIterable(stream: any): AsyncIterableIterator { - if (stream[Symbol.asyncIterator]) return stream; - - const reader = stream.getReader(); - return { - async next() { - try { - const result = await reader.read(); - if (result?.done) reader.releaseLock(); // release lock when stream becomes closed - return result; - } catch (e) { - reader.releaseLock(); // release lock when stream becomes errored - throw e; - } - }, - async return() { - const cancelPromise = reader.cancel(); - reader.releaseLock(); - await cancelPromise; - return { done: true, value: undefined }; - }, - [Symbol.asyncIterator]() { - return this; - }, - }; -} - -/** - * Cancels a ReadableStream we don't need to consume. - * See https://undici.nodejs.org/#/?id=garbage-collection - */ -export async function CancelReadableStream(stream: any): Promise { - if (stream === null || typeof stream !== 'object') return; - - if (stream[Symbol.asyncIterator]) { - await stream[Symbol.asyncIterator]().return?.(); - return; - } - - const reader = stream.getReader(); - const cancelPromise = reader.cancel(); - reader.releaseLock(); - await cancelPromise; -} diff --git a/src/internal/stream-utils.ts b/src/internal/stream-utils.ts new file mode 100644 index 0000000..37f7793 --- /dev/null +++ b/src/internal/stream-utils.ts @@ -0,0 +1,32 @@ +/** + * Most browsers don't yet have async iterable support for ReadableStream, + * and Node has a very different way of reading bytes from its "ReadableStream". + * + * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 + */ +export function ReadableStreamToAsyncIterable(stream: any): AsyncIterableIterator { + if (stream[Symbol.asyncIterator]) return stream; + + const reader = stream.getReader(); + return { + async next() { + try { + const result = await reader.read(); + if (result?.done) reader.releaseLock(); // release lock when stream becomes closed + return result; + } catch (e) { + reader.releaseLock(); // release lock when stream becomes errored + throw e; + } + }, + async return() { + const cancelPromise = reader.cancel(); + reader.releaseLock(); + await cancelPromise; + return { done: true, value: undefined }; + }, + [Symbol.asyncIterator]() { + return this; + }, + }; +} diff --git a/src/internal/to-file.ts b/src/internal/to-file.ts deleted file mode 100644 index 245e849..0000000 --- a/src/internal/to-file.ts +++ /dev/null @@ -1,154 +0,0 @@ -import { BlobPart, getName, makeFile, isAsyncIterable } from './uploads'; -import type { FilePropertyBag } from './builtin-types'; -import { checkFileSupport } from './uploads'; - -type BlobLikePart = string | ArrayBuffer | ArrayBufferView | BlobLike | DataView; - -/** - * Intended to match DOM Blob, node-fetch Blob, node:buffer Blob, etc. - * Don't add arrayBuffer here, node-fetch doesn't have it - */ -interface BlobLike { - /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/size) */ - readonly size: number; - /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/type) */ - readonly type: string; - /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/text) */ - text(): Promise; - /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/slice) */ - slice(start?: number, end?: number): BlobLike; -} - -/** - * This check adds the arrayBuffer() method type because it is available and used at runtime - */ -const isBlobLike = (value: any): value is BlobLike & { arrayBuffer(): Promise } => - value != null && - typeof value === 'object' && - typeof value.size === 'number' && - typeof value.type === 'string' && - typeof value.text === 'function' && - typeof value.slice === 'function' && - typeof value.arrayBuffer === 'function'; - -/** - * Intended to match DOM File, node:buffer File, undici File, etc. - */ -interface FileLike extends BlobLike { - /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/lastModified) */ - readonly lastModified: number; - /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/name) */ - readonly name?: string | undefined; -} - -/** - * This check adds the arrayBuffer() method type because it is available and used at runtime - */ -const isFileLike = (value: any): value is FileLike & { arrayBuffer(): Promise } => - value != null && - typeof value === 'object' && - typeof value.name === 'string' && - typeof value.lastModified === 'number' && - isBlobLike(value); - -/** - * Intended to match DOM Response, node-fetch Response, undici Response, etc. - */ -export interface ResponseLike { - url: string; - blob(): Promise; -} - -const isResponseLike = (value: any): value is ResponseLike => - value != null && - typeof value === 'object' && - typeof value.url === 'string' && - typeof value.blob === 'function'; - -export type ToFileInput = - | FileLike - | ResponseLike - | Exclude - | AsyncIterable; - -/** - * Helper for creating a {@link File} to pass to an SDK upload method from a variety of different data formats - * @param value the raw content of the file. Can be an {@link Uploadable}, {@link BlobLikePart}, or {@link AsyncIterable} of {@link BlobLikePart}s - * @param {string=} name the name of the file. If omitted, toFile will try to determine a file name from bits if possible - * @param {Object=} options additional properties - * @param {string=} options.type the MIME type of the content - * @param {number=} options.lastModified the last modified timestamp - * @returns a {@link File} with the given properties - */ -export async function toFile( - value: ToFileInput | PromiseLike, - name?: string | null | undefined, - options?: FilePropertyBag | undefined, -): Promise { - checkFileSupport(); - - // If it's a promise, resolve it. - value = await value; - - // If we've been given a `File` we don't need to do anything - if (isFileLike(value)) { - if (value instanceof File) { - return value; - } - return makeFile([await value.arrayBuffer()], value.name); - } - - if (isResponseLike(value)) { - const blob = await value.blob(); - name ||= new URL(value.url).pathname.split(/[\\/]/).pop(); - - return makeFile(await getBytes(blob), name, options); - } - - const parts = await getBytes(value); - - name ||= getName(value); - - if (!options?.type) { - const type = parts.find((part) => typeof part === 'object' && 'type' in part && part.type); - if (typeof type === 'string') { - options = { ...options, type }; - } - } - - return makeFile(parts, name, options); -} - -async function getBytes(value: BlobLikePart | AsyncIterable): Promise> { - let parts: Array = []; - if ( - typeof value === 'string' || - ArrayBuffer.isView(value) || // includes Uint8Array, Buffer, etc. - value instanceof ArrayBuffer - ) { - parts.push(value); - } else if (isBlobLike(value)) { - parts.push(value instanceof Blob ? value : await value.arrayBuffer()); - } else if ( - isAsyncIterable(value) // includes Readable, ReadableStream, etc. - ) { - for await (const chunk of value) { - parts.push(...(await getBytes(chunk as BlobLikePart))); // TODO, consider validating? - } - } else { - const constructor = value?.constructor?.name; - throw new Error( - `Unexpected data type: ${typeof value}${ - constructor ? `; constructor: ${constructor}` : '' - }${propsForError(value)}`, - ); - } - - return parts; -} - -function propsForError(value: unknown): string { - if (typeof value !== 'object' || value === null) return ''; - const props = Object.getOwnPropertyNames(value); - return `; props: [${props.map((p) => `"${p}"`).join(', ')}]`; -} diff --git a/src/internal/types.ts b/src/internal/types.ts deleted file mode 100644 index b668dfc..0000000 --- a/src/internal/types.ts +++ /dev/null @@ -1,95 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export type PromiseOrValue = T | Promise; -export type HTTPMethod = 'get' | 'post' | 'put' | 'patch' | 'delete'; - -export type KeysEnum = { [P in keyof Required]: true }; - -export type FinalizedRequestInit = RequestInit & { headers: Headers }; - -type NotAny = [0] extends [1 & T] ? never : T; - -/** - * Some environments overload the global fetch function, and Parameters only gets the last signature. - */ -type OverloadedParameters = - T extends ( - { - (...args: infer A): unknown; - (...args: infer B): unknown; - (...args: infer C): unknown; - (...args: infer D): unknown; - } - ) ? - A | B | C | D - : T extends ( - { - (...args: infer A): unknown; - (...args: infer B): unknown; - (...args: infer C): unknown; - } - ) ? - A | B | C - : T extends ( - { - (...args: infer A): unknown; - (...args: infer B): unknown; - } - ) ? - A | B - : T extends (...args: infer A) => unknown ? A - : never; - -/* eslint-disable */ -/** - * These imports attempt to get types from a parent package's dependencies. - * Unresolved bare specifiers can trigger [automatic type acquisition][1] in some projects, which - * would cause typescript to show types not present at runtime. To avoid this, we import - * directly from parent node_modules folders. - * - * We need to check multiple levels because we don't know what directory structure we'll be in. - * For example, pnpm generates directories like this: - * ``` - * node_modules - * ├── .pnpm - * │ └── pkg@1.0.0 - * │ └── node_modules - * │ └── pkg - * │ └── internal - * │ └── types.d.ts - * ├── pkg -> .pnpm/pkg@1.0.0/node_modules/pkg - * └── undici - * ``` - * - * [1]: https://www.typescriptlang.org/tsconfig/#typeAcquisition - */ -/** @ts-ignore For users with \@types/node */ -type UndiciTypesRequestInit = NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny; -/** @ts-ignore For users with undici */ -type UndiciRequestInit = NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny; -/** @ts-ignore For users with \@types/bun */ -type BunRequestInit = globalThis.FetchRequestInit; -/** @ts-ignore For users with node-fetch@2 */ -type NodeFetch2RequestInit = NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny; -/** @ts-ignore For users with node-fetch@3, doesn't need file extension because types are at ./@types/index.d.ts */ -type NodeFetch3RequestInit = NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny | NotAny; -/** @ts-ignore For users who use Deno */ -type FetchRequestInit = NonNullable[1]>; -/* eslint-enable */ - -type RequestInits = - | NotAny - | NotAny - | NotAny - | NotAny - | NotAny - | NotAny - | NotAny; - -/** - * This type contains `RequestInit` options that may be available on the current runtime, - * including per-platform extensions like `dispatcher`, `agent`, `client`, etc. - */ -export type MergedRequestInit = RequestInits & - /** We don't include these in the types as they'll be overridden for every request. */ - Partial>; diff --git a/src/internal/uploads.ts b/src/internal/uploads.ts deleted file mode 100644 index ffaae4f..0000000 --- a/src/internal/uploads.ts +++ /dev/null @@ -1,187 +0,0 @@ -import { type RequestOptions } from './request-options'; -import type { FilePropertyBag, Fetch } from './builtin-types'; -import type { LlamaStackClient } from '../client'; -import { ReadableStreamFrom } from './shims'; - -export type BlobPart = string | ArrayBuffer | ArrayBufferView | Blob | DataView; -type FsReadStream = AsyncIterable & { path: string | { toString(): string } }; - -// https://github.com/oven-sh/bun/issues/5980 -interface BunFile extends Blob { - readonly name?: string | undefined; -} - -export const checkFileSupport = () => { - if (typeof File === 'undefined') { - const { process } = globalThis as any; - const isOldNode = - typeof process?.versions?.node === 'string' && parseInt(process.versions.node.split('.')) < 20; - throw new Error( - '`File` is not defined as a global, which is required for file uploads.' + - (isOldNode ? - " Update to Node 20 LTS or newer, or set `globalThis.File` to `import('node:buffer').File`." - : ''), - ); - } -}; - -/** - * Typically, this is a native "File" class. - * - * We provide the {@link toFile} utility to convert a variety of objects - * into the File class. - * - * For convenience, you can also pass a fetch Response, or in Node, - * the result of fs.createReadStream(). - */ -export type Uploadable = File | Response | FsReadStream | BunFile; - -/** - * Construct a `File` instance. This is used to ensure a helpful error is thrown - * for environments that don't define a global `File` yet. - */ -export function makeFile( - fileBits: BlobPart[], - fileName: string | undefined, - options?: FilePropertyBag, -): File { - checkFileSupport(); - return new File(fileBits as any, fileName ?? 'unknown_file', options); -} - -export function getName(value: any): string | undefined { - return ( - ( - (typeof value === 'object' && - value !== null && - (('name' in value && value.name && String(value.name)) || - ('url' in value && value.url && String(value.url)) || - ('filename' in value && value.filename && String(value.filename)) || - ('path' in value && value.path && String(value.path)))) || - '' - ) - .split(/[\\/]/) - .pop() || undefined - ); -} - -export const isAsyncIterable = (value: any): value is AsyncIterable => - value != null && typeof value === 'object' && typeof value[Symbol.asyncIterator] === 'function'; - -/** - * Returns a multipart/form-data request if any part of the given request body contains a File / Blob value. - * Otherwise returns the request as is. - */ -export const maybeMultipartFormRequestOptions = async ( - opts: RequestOptions, - fetch: LlamaStackClient | Fetch, -): Promise => { - if (!hasUploadableValue(opts.body)) return opts; - - return { ...opts, body: await createForm(opts.body, fetch) }; -}; - -type MultipartFormRequestOptions = Omit & { body: unknown }; - -export const multipartFormRequestOptions = async ( - opts: MultipartFormRequestOptions, - fetch: LlamaStackClient | Fetch, -): Promise => { - return { ...opts, body: await createForm(opts.body, fetch) }; -}; - -const supportsFormDataMap = /** @__PURE__ */ new WeakMap>(); - -/** - * node-fetch doesn't support the global FormData object in recent node versions. Instead of sending - * properly-encoded form data, it just stringifies the object, resulting in a request body of "[object FormData]". - * This function detects if the fetch function provided supports the global FormData object to avoid - * confusing error messages later on. - */ -function supportsFormData(fetchObject: LlamaStackClient | Fetch): Promise { - const fetch: Fetch = typeof fetchObject === 'function' ? fetchObject : (fetchObject as any).fetch; - const cached = supportsFormDataMap.get(fetch); - if (cached) return cached; - const promise = (async () => { - try { - const FetchResponse = ( - 'Response' in fetch ? - fetch.Response - : (await fetch('data:,')).constructor) as typeof Response; - const data = new FormData(); - if (data.toString() === (await new FetchResponse(data).text())) { - return false; - } - return true; - } catch { - // avoid false negatives - return true; - } - })(); - supportsFormDataMap.set(fetch, promise); - return promise; -} - -export const createForm = async >( - body: T | undefined, - fetch: LlamaStackClient | Fetch, -): Promise => { - if (!(await supportsFormData(fetch))) { - throw new TypeError( - 'The provided fetch function does not support file uploads with the current global FormData class.', - ); - } - const form = new FormData(); - await Promise.all(Object.entries(body || {}).map(([key, value]) => addFormValue(form, key, value))); - return form; -}; - -// We check for Blob not File because Bun.File doesn't inherit from File, -// but they both inherit from Blob and have a `name` property at runtime. -const isNamedBlob = (value: unknown) => value instanceof Blob && 'name' in value; - -const isUploadable = (value: unknown) => - typeof value === 'object' && - value !== null && - (value instanceof Response || isAsyncIterable(value) || isNamedBlob(value)); - -const hasUploadableValue = (value: unknown): boolean => { - if (isUploadable(value)) return true; - if (Array.isArray(value)) return value.some(hasUploadableValue); - if (value && typeof value === 'object') { - for (const k in value) { - if (hasUploadableValue((value as any)[k])) return true; - } - } - return false; -}; - -const addFormValue = async (form: FormData, key: string, value: unknown): Promise => { - if (value === undefined) return; - if (value == null) { - throw new TypeError( - `Received null for "${key}"; to pass null in FormData, you must use the string 'null'`, - ); - } - - // TODO: make nested formats configurable - if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { - form.append(key, String(value)); - } else if (value instanceof Response) { - form.append(key, makeFile([await value.blob()], getName(value))); - } else if (isAsyncIterable(value)) { - form.append(key, makeFile([await new Response(ReadableStreamFrom(value)).blob()], getName(value))); - } else if (isNamedBlob(value)) { - form.append(key, value, getName(value)); - } else if (Array.isArray(value)) { - await Promise.all(value.map((entry) => addFormValue(form, key + '[]', entry))); - } else if (typeof value === 'object') { - await Promise.all( - Object.entries(value).map(([name, prop]) => addFormValue(form, `${key}[${name}]`, prop)), - ); - } else { - throw new TypeError( - `Invalid value given to form, expected a string, number, boolean, object, Array, File or Blob but got ${value} instead`, - ); - } -}; diff --git a/src/internal/utils.ts b/src/internal/utils.ts deleted file mode 100644 index 3cbfacc..0000000 --- a/src/internal/utils.ts +++ /dev/null @@ -1,8 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export * from './utils/values'; -export * from './utils/base64'; -export * from './utils/env'; -export * from './utils/log'; -export * from './utils/uuid'; -export * from './utils/sleep'; diff --git a/src/internal/utils/base64.ts b/src/internal/utils/base64.ts deleted file mode 100644 index 7c0ee30..0000000 --- a/src/internal/utils/base64.ts +++ /dev/null @@ -1,40 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { LlamaStackClientError } from '../../core/error'; -import { encodeUTF8 } from './bytes'; - -export const toBase64 = (data: string | Uint8Array | null | undefined): string => { - if (!data) return ''; - - if (typeof (globalThis as any).Buffer !== 'undefined') { - return (globalThis as any).Buffer.from(data).toString('base64'); - } - - if (typeof data === 'string') { - data = encodeUTF8(data); - } - - if (typeof btoa !== 'undefined') { - return btoa(String.fromCharCode.apply(null, data as any)); - } - - throw new LlamaStackClientError('Cannot generate base64 string; Expected `Buffer` or `btoa` to be defined'); -}; - -export const fromBase64 = (str: string): Uint8Array => { - if (typeof (globalThis as any).Buffer !== 'undefined') { - const buf = (globalThis as any).Buffer.from(str, 'base64'); - return new Uint8Array(buf.buffer, buf.byteOffset, buf.byteLength); - } - - if (typeof atob !== 'undefined') { - const bstr = atob(str); - const buf = new Uint8Array(bstr.length); - for (let i = 0; i < bstr.length; i++) { - buf[i] = bstr.charCodeAt(i); - } - return buf; - } - - throw new LlamaStackClientError('Cannot decode base64 string; Expected `Buffer` or `atob` to be defined'); -}; diff --git a/src/internal/utils/bytes.ts b/src/internal/utils/bytes.ts deleted file mode 100644 index 8da627a..0000000 --- a/src/internal/utils/bytes.ts +++ /dev/null @@ -1,32 +0,0 @@ -export function concatBytes(buffers: Uint8Array[]): Uint8Array { - let length = 0; - for (const buffer of buffers) { - length += buffer.length; - } - const output = new Uint8Array(length); - let index = 0; - for (const buffer of buffers) { - output.set(buffer, index); - index += buffer.length; - } - - return output; -} - -let encodeUTF8_: (str: string) => Uint8Array; -export function encodeUTF8(str: string) { - let encoder; - return ( - encodeUTF8_ ?? - ((encoder = new (globalThis as any).TextEncoder()), (encodeUTF8_ = encoder.encode.bind(encoder))) - )(str); -} - -let decodeUTF8_: (bytes: Uint8Array) => string; -export function decodeUTF8(bytes: Uint8Array) { - let decoder; - return ( - decodeUTF8_ ?? - ((decoder = new (globalThis as any).TextDecoder()), (decodeUTF8_ = decoder.decode.bind(decoder))) - )(bytes); -} diff --git a/src/internal/utils/env.ts b/src/internal/utils/env.ts deleted file mode 100644 index 2d84800..0000000 --- a/src/internal/utils/env.ts +++ /dev/null @@ -1,18 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -/** - * Read an environment variable. - * - * Trims beginning and trailing whitespace. - * - * Will return undefined if the environment variable doesn't exist or cannot be accessed. - */ -export const readEnv = (env: string): string | undefined => { - if (typeof (globalThis as any).process !== 'undefined') { - return (globalThis as any).process.env?.[env]?.trim() ?? undefined; - } - if (typeof (globalThis as any).Deno !== 'undefined') { - return (globalThis as any).Deno.env?.get?.(env)?.trim(); - } - return undefined; -}; diff --git a/src/internal/utils/log.ts b/src/internal/utils/log.ts deleted file mode 100644 index c0c00a9..0000000 --- a/src/internal/utils/log.ts +++ /dev/null @@ -1,126 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { hasOwn } from './values'; -import { type LlamaStackClient } from '../../client'; -import { RequestOptions } from '../request-options'; - -type LogFn = (message: string, ...rest: unknown[]) => void; -export type Logger = { - error: LogFn; - warn: LogFn; - info: LogFn; - debug: LogFn; -}; -export type LogLevel = 'off' | 'error' | 'warn' | 'info' | 'debug'; - -const levelNumbers = { - off: 0, - error: 200, - warn: 300, - info: 400, - debug: 500, -}; - -export const parseLogLevel = ( - maybeLevel: string | undefined, - sourceName: string, - client: LlamaStackClient, -): LogLevel | undefined => { - if (!maybeLevel) { - return undefined; - } - if (hasOwn(levelNumbers, maybeLevel)) { - return maybeLevel; - } - loggerFor(client).warn( - `${sourceName} was set to ${JSON.stringify(maybeLevel)}, expected one of ${JSON.stringify( - Object.keys(levelNumbers), - )}`, - ); - return undefined; -}; - -function noop() {} - -function makeLogFn(fnLevel: keyof Logger, logger: Logger | undefined, logLevel: LogLevel) { - if (!logger || levelNumbers[fnLevel] > levelNumbers[logLevel]) { - return noop; - } else { - // Don't wrap logger functions, we want the stacktrace intact! - return logger[fnLevel].bind(logger); - } -} - -const noopLogger = { - error: noop, - warn: noop, - info: noop, - debug: noop, -}; - -let cachedLoggers = /** @__PURE__ */ new WeakMap(); - -export function loggerFor(client: LlamaStackClient): Logger { - const logger = client.logger; - const logLevel = client.logLevel ?? 'off'; - if (!logger) { - return noopLogger; - } - - const cachedLogger = cachedLoggers.get(logger); - if (cachedLogger && cachedLogger[0] === logLevel) { - return cachedLogger[1]; - } - - const levelLogger = { - error: makeLogFn('error', logger, logLevel), - warn: makeLogFn('warn', logger, logLevel), - info: makeLogFn('info', logger, logLevel), - debug: makeLogFn('debug', logger, logLevel), - }; - - cachedLoggers.set(logger, [logLevel, levelLogger]); - - return levelLogger; -} - -export const formatRequestDetails = (details: { - options?: RequestOptions | undefined; - headers?: Headers | Record | undefined; - retryOfRequestLogID?: string | undefined; - retryOf?: string | undefined; - url?: string | undefined; - status?: number | undefined; - method?: string | undefined; - durationMs?: number | undefined; - message?: unknown; - body?: unknown; -}) => { - if (details.options) { - details.options = { ...details.options }; - delete details.options['headers']; // redundant + leaks internals - } - if (details.headers) { - details.headers = Object.fromEntries( - (details.headers instanceof Headers ? [...details.headers] : Object.entries(details.headers)).map( - ([name, value]) => [ - name, - ( - name.toLowerCase() === 'authorization' || - name.toLowerCase() === 'cookie' || - name.toLowerCase() === 'set-cookie' - ) ? - '***' - : value, - ], - ), - ); - } - if ('retryOfRequestLogID' in details) { - if (details.retryOfRequestLogID) { - details.retryOf = details.retryOfRequestLogID; - } - delete details.retryOfRequestLogID; - } - return details; -}; diff --git a/src/internal/utils/path.ts b/src/internal/utils/path.ts deleted file mode 100644 index 35c67bc..0000000 --- a/src/internal/utils/path.ts +++ /dev/null @@ -1,65 +0,0 @@ -import { LlamaStackClientError } from '../../core/error'; - -/** - * Percent-encode everything that isn't safe to have in a path without encoding safe chars. - * - * Taken from https://datatracker.ietf.org/doc/html/rfc3986#section-3.3: - * > unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" - * > sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" - * > pchar = unreserved / pct-encoded / sub-delims / ":" / "@" - */ -export function encodeURIPath(str: string) { - return str.replace(/[^A-Za-z0-9\-._~!$&'()*+,;=:@]+/g, encodeURIComponent); -} - -export const createPathTagFunction = (pathEncoder = encodeURIPath) => - function path(statics: readonly string[], ...params: readonly unknown[]): string { - // If there are no params, no processing is needed. - if (statics.length === 1) return statics[0]!; - - let postPath = false; - const path = statics.reduce((previousValue, currentValue, index) => { - if (/[?#]/.test(currentValue)) { - postPath = true; - } - return ( - previousValue + - currentValue + - (index === params.length ? '' : (postPath ? encodeURIComponent : pathEncoder)(String(params[index]))) - ); - }, ''); - - const pathOnly = path.split(/[?#]/, 1)[0]!; - const invalidSegments = []; - const invalidSegmentPattern = /(?<=^|\/)(?:\.|%2e){1,2}(?=\/|$)/gi; - let match; - - // Find all invalid segments - while ((match = invalidSegmentPattern.exec(pathOnly)) !== null) { - invalidSegments.push({ - start: match.index, - length: match[0].length, - }); - } - - if (invalidSegments.length > 0) { - let lastEnd = 0; - const underline = invalidSegments.reduce((acc, segment) => { - const spaces = ' '.repeat(segment.start - lastEnd); - const arrows = '^'.repeat(segment.length); - lastEnd = segment.start + segment.length; - return acc + spaces + arrows; - }, ''); - - throw new LlamaStackClientError( - `Path parameters result in path with invalid segments:\n${path}\n${underline}`, - ); - } - - return path; - }; - -/** - * URI-encodes path params and ensures no unsafe /./ or /../ path segments are introduced. - */ -export const path = /* @__PURE__ */ createPathTagFunction(encodeURIPath); diff --git a/src/internal/utils/sleep.ts b/src/internal/utils/sleep.ts deleted file mode 100644 index 65e5296..0000000 --- a/src/internal/utils/sleep.ts +++ /dev/null @@ -1,3 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); diff --git a/src/internal/utils/uuid.ts b/src/internal/utils/uuid.ts deleted file mode 100644 index b0e53aa..0000000 --- a/src/internal/utils/uuid.ts +++ /dev/null @@ -1,17 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -/** - * https://stackoverflow.com/a/2117523 - */ -export let uuid4 = function () { - const { crypto } = globalThis as any; - if (crypto?.randomUUID) { - uuid4 = crypto.randomUUID.bind(crypto); - return crypto.randomUUID(); - } - const u8 = new Uint8Array(1); - const randomByte = crypto ? () => crypto.getRandomValues(u8)[0]! : () => (Math.random() * 0xff) & 0xff; - return '10000000-1000-4000-8000-100000000000'.replace(/[018]/g, (c) => - (+c ^ (randomByte() & (15 >> (+c / 4)))).toString(16), - ); -}; diff --git a/src/internal/utils/values.ts b/src/internal/utils/values.ts deleted file mode 100644 index 256b2f6..0000000 --- a/src/internal/utils/values.ts +++ /dev/null @@ -1,105 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { LlamaStackClientError } from '../../core/error'; - -// https://url.spec.whatwg.org/#url-scheme-string -const startsWithSchemeRegexp = /^[a-z][a-z0-9+.-]*:/i; - -export const isAbsoluteURL = (url: string): boolean => { - return startsWithSchemeRegexp.test(url); -}; - -export let isArray = (val: unknown): val is unknown[] => ((isArray = Array.isArray), isArray(val)); -export let isReadonlyArray = isArray as (val: unknown) => val is readonly unknown[]; - -/** Returns an object if the given value isn't an object, otherwise returns as-is */ -export function maybeObj(x: unknown): object { - if (typeof x !== 'object') { - return {}; - } - - return x ?? {}; -} - -// https://stackoverflow.com/a/34491287 -export function isEmptyObj(obj: Object | null | undefined): boolean { - if (!obj) return true; - for (const _k in obj) return false; - return true; -} - -// https://eslint.org/docs/latest/rules/no-prototype-builtins -export function hasOwn(obj: T, key: PropertyKey): key is keyof T { - return Object.prototype.hasOwnProperty.call(obj, key); -} - -export function isObj(obj: unknown): obj is Record { - return obj != null && typeof obj === 'object' && !Array.isArray(obj); -} - -export const ensurePresent = (value: T | null | undefined): T => { - if (value == null) { - throw new LlamaStackClientError(`Expected a value to be given but received ${value} instead.`); - } - - return value; -}; - -export const validatePositiveInteger = (name: string, n: unknown): number => { - if (typeof n !== 'number' || !Number.isInteger(n)) { - throw new LlamaStackClientError(`${name} must be an integer`); - } - if (n < 0) { - throw new LlamaStackClientError(`${name} must be a positive integer`); - } - return n; -}; - -export const coerceInteger = (value: unknown): number => { - if (typeof value === 'number') return Math.round(value); - if (typeof value === 'string') return parseInt(value, 10); - - throw new LlamaStackClientError(`Could not coerce ${value} (type: ${typeof value}) into a number`); -}; - -export const coerceFloat = (value: unknown): number => { - if (typeof value === 'number') return value; - if (typeof value === 'string') return parseFloat(value); - - throw new LlamaStackClientError(`Could not coerce ${value} (type: ${typeof value}) into a number`); -}; - -export const coerceBoolean = (value: unknown): boolean => { - if (typeof value === 'boolean') return value; - if (typeof value === 'string') return value === 'true'; - return Boolean(value); -}; - -export const maybeCoerceInteger = (value: unknown): number | undefined => { - if (value === undefined) { - return undefined; - } - return coerceInteger(value); -}; - -export const maybeCoerceFloat = (value: unknown): number | undefined => { - if (value === undefined) { - return undefined; - } - return coerceFloat(value); -}; - -export const maybeCoerceBoolean = (value: unknown): boolean | undefined => { - if (value === undefined) { - return undefined; - } - return coerceBoolean(value); -}; - -export const safeJSON = (text: string) => { - try { - return JSON.parse(text); - } catch (err) { - return undefined; - } -}; diff --git a/src/pagination.ts b/src/pagination.ts new file mode 100644 index 0000000..9e0d28e --- /dev/null +++ b/src/pagination.ts @@ -0,0 +1,62 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { AbstractPage, Response, APIClient, FinalRequestOptions, PageInfo } from './core'; + +export interface DatasetsIterrowsResponse { + data: Array; + + next_index: number; +} + +export interface DatasetsIterrowsParams { + dataset_id?: string; + + start_index?: number; + + limit?: number; +} + +export class DatasetsIterrows extends AbstractPage implements DatasetsIterrowsResponse { + data: Array; + + next_index: number; + + constructor( + client: APIClient, + response: Response, + body: DatasetsIterrowsResponse, + options: FinalRequestOptions, + ) { + super(client, response, body, options); + + this.data = body.data || []; + this.next_index = body.next_index || 0; + } + + getPaginatedItems(): Item[] { + return this.data ?? []; + } + + // @deprecated Please use `nextPageInfo()` instead + nextPageParams(): Partial | null { + const info = this.nextPageInfo(); + if (!info) return null; + if ('params' in info) return info.params; + const params = Object.fromEntries(info.url.searchParams); + if (!Object.keys(params).length) return null; + return params; + } + + nextPageInfo(): PageInfo | null { + const cursor = this.next_index; + if (!cursor) { + return null; + } + + return { + params: { + start_index: cursor, + }, + }; + } +} diff --git a/src/resource.ts b/src/resource.ts index 363e351..eafa8c0 100644 --- a/src/resource.ts +++ b/src/resource.ts @@ -1,2 +1,11 @@ -/** @deprecated Import from ./core/resource instead */ -export * from './core/resource'; +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import type { LlamaStackClient } from './index'; + +export abstract class APIResource { + protected _client: LlamaStackClient; + + constructor(client: LlamaStackClient) { + this._client = client; + } +} diff --git a/src/resources/agents/agents.ts b/src/resources/agents/agents.ts index e4709a7..d845b30 100644 --- a/src/resources/agents/agents.ts +++ b/src/resources/agents/agents.ts @@ -1,175 +1,339 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../core/resource'; -import * as InferenceAPI from '../inference'; -import * as ToolRuntimeAPI from '../tool-runtime/tool-runtime'; -import * as SessionAPI from './session/session'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as Shared from '../shared'; +import * as SessionAPI from './session'; import { Session, SessionCreateParams, SessionCreateResponse, - SessionDeleteParams, + SessionListParams, + SessionListResponse, SessionResource, SessionRetrieveParams, -} from './session/session'; -import * as TurnAPI from './session/turn/turn'; -import { APIPromise } from '../../core/api-promise'; -import { buildHeaders } from '../../internal/headers'; -import { RequestOptions } from '../../internal/request-options'; -import { path } from '../../internal/utils/path'; +} from './session'; +import * as StepsAPI from './steps'; +import { StepRetrieveResponse, Steps } from './steps'; +import * as TurnAPI from './turn'; +import { + AgentTurnResponseStreamChunk, + Turn, + TurnCreateParams, + TurnCreateParamsNonStreaming, + TurnCreateParamsStreaming, + TurnResource, + TurnResponseEvent, + TurnResponseEventPayload, + TurnResumeParams, + TurnResumeParamsNonStreaming, + TurnResumeParamsStreaming, +} from './turn'; export class Agents extends APIResource { session: SessionAPI.SessionResource = new SessionAPI.SessionResource(this._client); + steps: StepsAPI.Steps = new StepsAPI.Steps(this._client); + turn: TurnAPI.TurnResource = new TurnAPI.TurnResource(this._client); /** * Create an agent with the given configuration. */ - create(body: AgentCreateParams, options?: RequestOptions): APIPromise { + create(body: AgentCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/agents', { body, ...options }); } /** * Describe an agent by its ID. */ - retrieve(agentID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/agents/${agentID}`, options); + retrieve(agentId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/agents/${agentId}`, options); } /** * List all agents. */ - list(options?: RequestOptions): APIPromise { - return this._client.get('/v1/agents', options); + list(query?: AgentListParams, options?: Core.RequestOptions): Core.APIPromise; + list(options?: Core.RequestOptions): Core.APIPromise; + list( + query: AgentListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.get('/v1/agents', { query, ...options }); } /** - * Delete an agent by its ID. + * Delete an agent by its ID and its associated sessions and turns. */ - delete(agentID: string, options?: RequestOptions): APIPromise { - return this._client.delete(path`/v1/agents/${agentID}`, { + delete(agentId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/agents/${agentId}`, { ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } +} +/** + * An inference step in an agent turn. + */ +export interface InferenceStep { /** - * List all session(s) of a given agent. + * The response from the LLM. */ - listSessions(agentID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/agents/${agentID}/sessions`, options); - } + model_response: Shared.CompletionMessage; + + /** + * The ID of the step. + */ + step_id: string; + + /** + * Type of the step in an agent turn. + */ + step_type: 'inference'; + + /** + * The ID of the turn. + */ + turn_id: string; + + /** + * The time the step completed. + */ + completed_at?: string; + + /** + * The time the step started. + */ + started_at?: string; } -export interface Agent { +/** + * A memory retrieval step in an agent turn. + */ +export interface MemoryRetrievalStep { + /** + * The context retrieved from the vector databases. + */ + inserted_context: Shared.InterleavedContent; + /** - * Configuration for an agent. + * The ID of the step. */ - agent_config: AgentConfig; + step_id: string; - agent_id: string; + /** + * Type of the step in an agent turn. + */ + step_type: 'memory_retrieval'; - created_at: string; + /** + * The ID of the turn. + */ + turn_id: string; + + /** + * The IDs of the vector databases to retrieve context from. + */ + vector_db_ids: string; + + /** + * The time the step completed. + */ + completed_at?: string; + + /** + * The time the step started. + */ + started_at?: string; } /** - * Configuration for an agent. + * A shield call step in an agent turn. */ -export interface AgentConfig { +export interface ShieldCallStep { + /** + * The ID of the step. + */ + step_id: string; + /** - * The system instructions for the agent + * Type of the step in an agent turn. */ - instructions: string; + step_type: 'shield_call'; /** - * The model identifier to use for the agent + * The ID of the turn. */ - model: string; + turn_id: string; - client_tools?: Array; + /** + * The time the step completed. + */ + completed_at?: string; /** - * Optional flag indicating whether session data has to be persisted + * The time the step started. */ - enable_session_persistence?: boolean; + started_at?: string; - input_shields?: Array; + /** + * The violation from the shield call. + */ + violation?: Shared.SafetyViolation; +} - max_infer_iters?: number; +/** + * A tool execution step in an agent turn. + */ +export interface ToolExecutionStep { + /** + * The ID of the step. + */ + step_id: string; /** - * Optional name for the agent, used in telemetry and identification + * Type of the step in an agent turn. */ - name?: string; + step_type: 'tool_execution'; - output_shields?: Array; + /** + * The tool calls to execute. + */ + tool_calls: Array; /** - * Optional response format configuration + * The tool responses from the tool calls. */ - response_format?: InferenceAPI.ResponseFormat; + tool_responses: Array; /** - * Sampling parameters. + * The ID of the turn. */ - sampling_params?: InferenceAPI.SamplingParams; + turn_id: string; /** - * @deprecated Whether tool use is required or automatic. This is a hint to the - * model which may not be followed. It depends on the Instruction Following - * capabilities of the model. + * The time the step completed. */ - tool_choice?: 'auto' | 'required' | 'none'; + completed_at?: string; /** - * Configuration for tool use. + * The time the step started. */ - tool_config?: InferenceAPI.ToolConfig; + started_at?: string; +} + +export interface ToolResponse { + call_id: string; /** - * @deprecated Prompt format for calling custom / zero shot tools. + * A image content item */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; + content: Shared.InterleavedContent; + + tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - toolgroups?: Array; + metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; } export interface AgentCreateResponse { agent_id: string; } -export interface AgentListResponse { - data: Array; +export interface AgentRetrieveResponse { + /** + * Configuration for an agent. + */ + agent_config: Shared.AgentConfig; + + agent_id: string; + + created_at: string; } -export interface AgentListSessionsResponse { - data: Array; +/** + * A generic paginated response that follows a simple format. + */ +export interface AgentListResponse { + /** + * The list of items for the current page + */ + data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + /** + * Whether there are more items available after this set + */ + has_more: boolean; + + /** + * The URL for accessing this list + */ + url?: string; } export interface AgentCreateParams { /** * The configuration for the agent. */ - agent_config: AgentConfig; + agent_config: Shared.AgentConfig; +} + +export interface AgentListParams { + /** + * The number of agents to return. + */ + limit?: number; + + /** + * The index to start the pagination from. + */ + start_index?: number; } Agents.SessionResource = SessionResource; +Agents.Steps = Steps; +Agents.TurnResource = TurnResource; export declare namespace Agents { export { - type Agent as Agent, - type AgentConfig as AgentConfig, + type InferenceStep as InferenceStep, + type MemoryRetrievalStep as MemoryRetrievalStep, + type ShieldCallStep as ShieldCallStep, + type ToolExecutionStep as ToolExecutionStep, + type ToolResponse as ToolResponse, type AgentCreateResponse as AgentCreateResponse, + type AgentRetrieveResponse as AgentRetrieveResponse, type AgentListResponse as AgentListResponse, - type AgentListSessionsResponse as AgentListSessionsResponse, type AgentCreateParams as AgentCreateParams, + type AgentListParams as AgentListParams, }; export { SessionResource as SessionResource, type Session as Session, type SessionCreateResponse as SessionCreateResponse, + type SessionListResponse as SessionListResponse, type SessionCreateParams as SessionCreateParams, type SessionRetrieveParams as SessionRetrieveParams, - type SessionDeleteParams as SessionDeleteParams, + type SessionListParams as SessionListParams, + }; + + export { Steps as Steps, type StepRetrieveResponse as StepRetrieveResponse }; + + export { + TurnResource as TurnResource, + type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk, + type Turn as Turn, + type TurnResponseEvent as TurnResponseEvent, + type TurnResponseEventPayload as TurnResponseEventPayload, + type TurnCreateParams as TurnCreateParams, + type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming, + type TurnCreateParamsStreaming as TurnCreateParamsStreaming, + type TurnResumeParams as TurnResumeParams, + type TurnResumeParamsNonStreaming as TurnResumeParamsNonStreaming, + type TurnResumeParamsStreaming as TurnResumeParamsStreaming, }; } diff --git a/src/resources/agents/index.ts b/src/resources/agents/index.ts index 10d07ad..88a44bf 100644 --- a/src/resources/agents/index.ts +++ b/src/resources/agents/index.ts @@ -2,18 +2,37 @@ export { Agents, - type Agent, - type AgentConfig, + type InferenceStep, + type MemoryRetrievalStep, + type ShieldCallStep, + type ToolExecutionStep, + type ToolResponse, type AgentCreateResponse, + type AgentRetrieveResponse, type AgentListResponse, - type AgentListSessionsResponse, type AgentCreateParams, + type AgentListParams, } from './agents'; export { SessionResource, type Session, type SessionCreateResponse, + type SessionListResponse, type SessionCreateParams, type SessionRetrieveParams, - type SessionDeleteParams, -} from './session/index'; + type SessionListParams, +} from './session'; +export { Steps, type StepRetrieveResponse } from './steps'; +export { + TurnResource, + type AgentTurnResponseStreamChunk, + type Turn, + type TurnResponseEvent, + type TurnResponseEventPayload, + type TurnCreateParams, + type TurnCreateParamsNonStreaming, + type TurnCreateParamsStreaming, + type TurnResumeParams, + type TurnResumeParamsNonStreaming, + type TurnResumeParamsStreaming, +} from './turn'; diff --git a/src/resources/agents/session.ts b/src/resources/agents/session.ts index 5f4f523..fa6a6c2 100644 --- a/src/resources/agents/session.ts +++ b/src/resources/agents/session.ts @@ -1,3 +1,145 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export * from './session/index'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as TurnAPI from './turn'; + +export class SessionResource extends APIResource { + /** + * Create a new session for an agent. + */ + create( + agentId: string, + body: SessionCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/agents/${agentId}/session`, { body, ...options }); + } + + /** + * Retrieve an agent session by its ID. + */ + retrieve( + agentId: string, + sessionId: string, + query?: SessionRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve(agentId: string, sessionId: string, options?: Core.RequestOptions): Core.APIPromise; + retrieve( + agentId: string, + sessionId: string, + query: SessionRetrieveParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.retrieve(agentId, sessionId, {}, query); + } + return this._client.get(`/v1/agents/${agentId}/session/${sessionId}`, { query, ...options }); + } + + /** + * List all session(s) of a given agent. + */ + list( + agentId: string, + query?: SessionListParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + list(agentId: string, options?: Core.RequestOptions): Core.APIPromise; + list( + agentId: string, + query: SessionListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list(agentId, {}, query); + } + return this._client.get(`/v1/agents/${agentId}/sessions`, { query, ...options }); + } + + /** + * Delete an agent session by its ID and its associated turns. + */ + delete(agentId: string, sessionId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/agents/${agentId}/session/${sessionId}`, { + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } +} + +/** + * A single session of an interaction with an Agentic System. + */ +export interface Session { + session_id: string; + + session_name: string; + + started_at: string; + + turns: Array; +} + +export interface SessionCreateResponse { + session_id: string; +} + +/** + * A generic paginated response that follows a simple format. + */ +export interface SessionListResponse { + /** + * The list of items for the current page + */ + data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + /** + * Whether there are more items available after this set + */ + has_more: boolean; + + /** + * The URL for accessing this list + */ + url?: string; +} + +export interface SessionCreateParams { + /** + * The name of the session to create. + */ + session_name: string; +} + +export interface SessionRetrieveParams { + /** + * (Optional) List of turn IDs to filter the session by. + */ + turn_ids?: Array; +} + +export interface SessionListParams { + /** + * The number of sessions to return. + */ + limit?: number; + + /** + * The index to start the pagination from. + */ + start_index?: number; +} + +export declare namespace SessionResource { + export { + type Session as Session, + type SessionCreateResponse as SessionCreateResponse, + type SessionListResponse as SessionListResponse, + type SessionCreateParams as SessionCreateParams, + type SessionRetrieveParams as SessionRetrieveParams, + type SessionListParams as SessionListParams, + }; +} diff --git a/src/resources/agents/session/index.ts b/src/resources/agents/session/index.ts deleted file mode 100644 index 61c45ad..0000000 --- a/src/resources/agents/session/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - SessionResource, - type Session, - type SessionCreateResponse, - type SessionCreateParams, - type SessionRetrieveParams, - type SessionDeleteParams, -} from './session'; -export { - TurnResource, - type AgentTool, - type InferenceStep, - type MemoryRetrievalStep, - type ShieldCallStep, - type ToolExecutionStep, - type ToolResponse, - type ToolResponseMessage, - type Turn, - type UserMessage, - type TurnCreateParams, - type TurnRetrieveParams, - type TurnResumeParams, -} from './turn/index'; diff --git a/src/resources/agents/session/session.ts b/src/resources/agents/session/session.ts deleted file mode 100644 index 3948cb8..0000000 --- a/src/resources/agents/session/session.ts +++ /dev/null @@ -1,128 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../../core/resource'; -import * as TurnAPI from './turn/turn'; -import { - AgentTool, - InferenceStep, - MemoryRetrievalStep, - ShieldCallStep, - ToolExecutionStep, - ToolResponse, - ToolResponseMessage, - Turn, - TurnCreateParams, - TurnResource, - TurnResumeParams, - TurnRetrieveParams, - UserMessage, -} from './turn/turn'; -import { APIPromise } from '../../../core/api-promise'; -import { buildHeaders } from '../../../internal/headers'; -import { RequestOptions } from '../../../internal/request-options'; -import { path } from '../../../internal/utils/path'; - -export class SessionResource extends APIResource { - turn: TurnAPI.TurnResource = new TurnAPI.TurnResource(this._client); - - /** - * Create a new session for an agent. - */ - create( - agentID: string, - body: SessionCreateParams, - options?: RequestOptions, - ): APIPromise { - return this._client.post(path`/v1/agents/${agentID}/session`, { body, ...options }); - } - - /** - * Retrieve an agent session by its ID. - */ - retrieve(sessionID: string, params: SessionRetrieveParams, options?: RequestOptions): APIPromise { - const { agent_id, ...query } = params; - return this._client.get(path`/v1/agents/${agent_id}/session/${sessionID}`, { query, ...options }); - } - - /** - * Delete an agent session by its ID. - */ - delete(sessionID: string, params: SessionDeleteParams, options?: RequestOptions): APIPromise { - const { agent_id } = params; - return this._client.delete(path`/v1/agents/${agent_id}/session/${sessionID}`, { - ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), - }); - } -} - -/** - * A single session of an interaction with an Agentic System. - */ -export interface Session { - session_id: string; - - session_name: string; - - started_at: string; - - turns: Array; -} - -export interface SessionCreateResponse { - session_id: string; -} - -export interface SessionCreateParams { - /** - * The name of the session to create. - */ - session_name: string; -} - -export interface SessionRetrieveParams { - /** - * Path param: The ID of the agent to get the session for. - */ - agent_id: string; - - /** - * Query param: (Optional) List of turn IDs to filter the session by. - */ - turn_ids?: Array; -} - -export interface SessionDeleteParams { - /** - * The ID of the agent to delete the session for. - */ - agent_id: string; -} - -SessionResource.TurnResource = TurnResource; - -export declare namespace SessionResource { - export { - type Session as Session, - type SessionCreateResponse as SessionCreateResponse, - type SessionCreateParams as SessionCreateParams, - type SessionRetrieveParams as SessionRetrieveParams, - type SessionDeleteParams as SessionDeleteParams, - }; - - export { - TurnResource as TurnResource, - type AgentTool as AgentTool, - type InferenceStep as InferenceStep, - type MemoryRetrievalStep as MemoryRetrievalStep, - type ShieldCallStep as ShieldCallStep, - type ToolExecutionStep as ToolExecutionStep, - type ToolResponse as ToolResponse, - type ToolResponseMessage as ToolResponseMessage, - type Turn as Turn, - type UserMessage as UserMessage, - type TurnCreateParams as TurnCreateParams, - type TurnRetrieveParams as TurnRetrieveParams, - type TurnResumeParams as TurnResumeParams, - }; -} diff --git a/src/resources/agents/session/turn.ts b/src/resources/agents/session/turn.ts deleted file mode 100644 index bca7843..0000000 --- a/src/resources/agents/session/turn.ts +++ /dev/null @@ -1,3 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export * from './turn/index'; diff --git a/src/resources/agents/session/turn/index.ts b/src/resources/agents/session/turn/index.ts deleted file mode 100644 index feea447..0000000 --- a/src/resources/agents/session/turn/index.ts +++ /dev/null @@ -1,18 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { Step, type StepRetrieveResponse, type StepRetrieveParams } from './step'; -export { - TurnResource, - type AgentTool, - type InferenceStep, - type MemoryRetrievalStep, - type ShieldCallStep, - type ToolExecutionStep, - type ToolResponse, - type ToolResponseMessage, - type Turn, - type UserMessage, - type TurnCreateParams, - type TurnRetrieveParams, - type TurnResumeParams, -} from './turn'; diff --git a/src/resources/agents/session/turn/step.ts b/src/resources/agents/session/turn/step.ts deleted file mode 100644 index 556b930..0000000 --- a/src/resources/agents/session/turn/step.ts +++ /dev/null @@ -1,56 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../../../core/resource'; -import * as TurnAPI from './turn'; -import { APIPromise } from '../../../../core/api-promise'; -import { RequestOptions } from '../../../../internal/request-options'; -import { path } from '../../../../internal/utils/path'; - -export class Step extends APIResource { - /** - * Retrieve an agent step by its ID. - */ - retrieve( - stepID: string, - params: StepRetrieveParams, - options?: RequestOptions, - ): APIPromise { - const { agent_id, session_id, turn_id } = params; - return this._client.get( - path`/v1/agents/${agent_id}/session/${session_id}/turn/${turn_id}/step/${stepID}`, - options, - ); - } -} - -export interface StepRetrieveResponse { - /** - * An inference step in an agent turn. - */ - step: - | TurnAPI.InferenceStep - | TurnAPI.ToolExecutionStep - | TurnAPI.ShieldCallStep - | TurnAPI.MemoryRetrievalStep; -} - -export interface StepRetrieveParams { - /** - * The ID of the agent to get the step for. - */ - agent_id: string; - - /** - * The ID of the session to get the step for. - */ - session_id: string; - - /** - * The ID of the turn to get the step for. - */ - turn_id: string; -} - -export declare namespace Step { - export { type StepRetrieveResponse as StepRetrieveResponse, type StepRetrieveParams as StepRetrieveParams }; -} diff --git a/src/resources/agents/session/turn/turn.ts b/src/resources/agents/session/turn/turn.ts deleted file mode 100644 index a354a4f..0000000 --- a/src/resources/agents/session/turn/turn.ts +++ /dev/null @@ -1,521 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../../../core/resource'; -import * as InferenceAPI from '../../../inference'; -import * as SafetyAPI from '../../../safety'; -import * as ToolRuntimeAPI from '../../../tool-runtime/tool-runtime'; -import * as StepAPI from './step'; -import { Step, StepRetrieveParams, StepRetrieveResponse } from './step'; -import { APIPromise } from '../../../../core/api-promise'; -import { RequestOptions } from '../../../../internal/request-options'; -import { path } from '../../../../internal/utils/path'; - -export class TurnResource extends APIResource { - step: StepAPI.Step = new StepAPI.Step(this._client); - - /** - * Create a new turn for an agent. - */ - create(sessionID: string, params: TurnCreateParams, options?: RequestOptions): APIPromise { - const { agent_id, ...body } = params; - return this._client.post(path`/v1/agents/${agent_id}/session/${sessionID}/turn`, { body, ...options }); - } - - /** - * Retrieve an agent turn by its ID. - */ - retrieve(turnID: string, params: TurnRetrieveParams, options?: RequestOptions): APIPromise { - const { agent_id, session_id } = params; - return this._client.get(path`/v1/agents/${agent_id}/session/${session_id}/turn/${turnID}`, options); - } - - /** - * Resume an agent turn with executed tool call responses. When a Turn has the - * status `awaiting_input` due to pending input from client side tool calls, this - * endpoint can be used to submit the outputs from the tool calls once they are - * ready. - */ - resume(turnID: string, params: TurnResumeParams, options?: RequestOptions): APIPromise { - const { agent_id, session_id, ...body } = params; - return this._client.post(path`/v1/agents/${agent_id}/session/${session_id}/turn/${turnID}/resume`, { - body, - ...options, - }); - } -} - -export type AgentTool = string | AgentTool.AgentToolGroupWithArgs; - -export namespace AgentTool { - export interface AgentToolGroupWithArgs { - args: { [key: string]: boolean | number | string | Array | unknown | null }; - - name: string; - } -} - -/** - * An inference step in an agent turn. - */ -export interface InferenceStep { - /** - * The response from the LLM. - */ - model_response: InferenceAPI.CompletionMessage; - - /** - * The ID of the step. - */ - step_id: string; - - /** - * Type of the step in an agent turn. - */ - step_type: 'inference'; - - /** - * The ID of the turn. - */ - turn_id: string; - - /** - * The time the step completed. - */ - completed_at?: string; - - /** - * The time the step started. - */ - started_at?: string; -} - -/** - * A memory retrieval step in an agent turn. - */ -export interface MemoryRetrievalStep { - /** - * The context retrieved from the vector databases. - */ - inserted_context: InferenceAPI.InterleavedContent; - - /** - * The ID of the step. - */ - step_id: string; - - /** - * Type of the step in an agent turn. - */ - step_type: 'memory_retrieval'; - - /** - * The ID of the turn. - */ - turn_id: string; - - /** - * The IDs of the vector databases to retrieve context from. - */ - vector_db_ids: string; - - /** - * The time the step completed. - */ - completed_at?: string; - - /** - * The time the step started. - */ - started_at?: string; -} - -/** - * A shield call step in an agent turn. - */ -export interface ShieldCallStep { - /** - * The ID of the step. - */ - step_id: string; - - /** - * Type of the step in an agent turn. - */ - step_type: 'shield_call'; - - /** - * The ID of the turn. - */ - turn_id: string; - - /** - * The time the step completed. - */ - completed_at?: string; - - /** - * The time the step started. - */ - started_at?: string; - - /** - * The violation from the shield call. - */ - violation?: SafetyAPI.SafetyViolation; -} - -/** - * A tool execution step in an agent turn. - */ -export interface ToolExecutionStep { - /** - * The ID of the step. - */ - step_id: string; - - /** - * Type of the step in an agent turn. - */ - step_type: 'tool_execution'; - - /** - * The tool calls to execute. - */ - tool_calls: Array; - - /** - * The tool responses from the tool calls. - */ - tool_responses: Array; - - /** - * The ID of the turn. - */ - turn_id: string; - - /** - * The time the step completed. - */ - completed_at?: string; - - /** - * The time the step started. - */ - started_at?: string; -} - -export interface ToolResponse { - call_id: string; - - /** - * A image content item - */ - content: InferenceAPI.InterleavedContent; - - tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; -} - -/** - * A message representing the result of a tool invocation. - */ -export interface ToolResponseMessage { - /** - * Unique identifier for the tool call this response is for - */ - call_id: string; - - /** - * The response content from the tool - */ - content: InferenceAPI.InterleavedContent; - - /** - * Must be "tool" to identify this as a tool response - */ - role: 'tool'; -} - -/** - * A single turn in an interaction with an Agentic System. - */ -export interface Turn { - input_messages: Array; - - /** - * A message containing the model's (assistant) response in a chat conversation. - */ - output_message: InferenceAPI.CompletionMessage; - - session_id: string; - - started_at: string; - - steps: Array; - - turn_id: string; - - completed_at?: string; - - output_attachments?: Array; -} - -export namespace Turn { - /** - * An attachment to an agent turn. - */ - export interface OutputAttachment { - /** - * The content of the attachment. - */ - content: - | string - | OutputAttachment.ImageContentItem - | OutputAttachment.TextContentItem - | Array - | ToolRuntimeAPI.URL; - - /** - * The MIME type of the attachment. - */ - mime_type: string; - } - - export namespace OutputAttachment { - /** - * A image content item - */ - export interface ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - image: ImageContentItem.Image; - - /** - * Discriminator type of the content item. Always "image" - */ - type: 'image'; - } - - export namespace ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - export interface Image { - /** - * base64 encoded image data as string - */ - data?: string; - - /** - * A URL of the image or data URL in the format of data:image/{type};base64,{data}. - * Note that URL could have length limits. - */ - url?: ToolRuntimeAPI.URL; - } - } - - /** - * A text content item - */ - export interface TextContentItem { - /** - * Text content - */ - text: string; - - /** - * Discriminator type of the content item. Always "text" - */ - type: 'text'; - } - } -} - -/** - * A message from the user in a chat conversation. - */ -export interface UserMessage { - /** - * The content of the message, which can include text and other media - */ - content: InferenceAPI.InterleavedContent; - - /** - * Must be "user" to identify this as a user message - */ - role: 'user'; - - /** - * (Optional) This field is used internally by Llama Stack to pass RAG context. - * This field may be removed in the API in the future. - */ - context?: InferenceAPI.InterleavedContent; -} - -export interface TurnCreateParams { - /** - * Path param: The ID of the agent to create the turn for. - */ - agent_id: string; - - /** - * Body param: List of messages to start the turn with. - */ - messages: Array; - - /** - * Body param: (Optional) List of documents to create the turn with. - */ - documents?: Array; - - /** - * Body param: (Optional) If True, generate an SSE event stream of the response. - * Defaults to False. - */ - stream?: boolean; - - /** - * Body param: (Optional) The tool configuration to create the turn with, will be - * used to override the agent's tool_config. - */ - tool_config?: InferenceAPI.ToolConfig; - - /** - * Body param: (Optional) List of toolgroups to create the turn with, will be used - * in addition to the agent's config toolgroups for the request. - */ - toolgroups?: Array; -} - -export namespace TurnCreateParams { - /** - * A document to be used by an agent. - */ - export interface Document { - /** - * The content of the document. - */ - content: - | string - | Document.ImageContentItem - | Document.TextContentItem - | Array - | ToolRuntimeAPI.URL; - - /** - * The MIME type of the document. - */ - mime_type: string; - } - - export namespace Document { - /** - * A image content item - */ - export interface ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - image: ImageContentItem.Image; - - /** - * Discriminator type of the content item. Always "image" - */ - type: 'image'; - } - - export namespace ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - export interface Image { - /** - * base64 encoded image data as string - */ - data?: string; - - /** - * A URL of the image or data URL in the format of data:image/{type};base64,{data}. - * Note that URL could have length limits. - */ - url?: ToolRuntimeAPI.URL; - } - } - - /** - * A text content item - */ - export interface TextContentItem { - /** - * Text content - */ - text: string; - - /** - * Discriminator type of the content item. Always "text" - */ - type: 'text'; - } - } -} - -export interface TurnRetrieveParams { - /** - * The ID of the agent to get the turn for. - */ - agent_id: string; - - /** - * The ID of the session to get the turn for. - */ - session_id: string; -} - -export interface TurnResumeParams { - /** - * Path param: The ID of the agent to resume. - */ - agent_id: string; - - /** - * Path param: The ID of the session to resume. - */ - session_id: string; - - /** - * Body param: The tool call responses to resume the turn with. - */ - tool_responses: Array; - - /** - * Body param: Whether to stream the response. - */ - stream?: boolean; -} - -TurnResource.Step = Step; - -export declare namespace TurnResource { - export { - type AgentTool as AgentTool, - type InferenceStep as InferenceStep, - type MemoryRetrievalStep as MemoryRetrievalStep, - type ShieldCallStep as ShieldCallStep, - type ToolExecutionStep as ToolExecutionStep, - type ToolResponse as ToolResponse, - type ToolResponseMessage as ToolResponseMessage, - type Turn as Turn, - type UserMessage as UserMessage, - type TurnCreateParams as TurnCreateParams, - type TurnRetrieveParams as TurnRetrieveParams, - type TurnResumeParams as TurnResumeParams, - }; - - export { - Step as Step, - type StepRetrieveResponse as StepRetrieveResponse, - type StepRetrieveParams as StepRetrieveParams, - }; -} diff --git a/src/resources/agents/steps.ts b/src/resources/agents/steps.ts new file mode 100644 index 0000000..1abf04b --- /dev/null +++ b/src/resources/agents/steps.ts @@ -0,0 +1,38 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as Core from '../../core'; +import * as AgentsAPI from './agents'; + +export class Steps extends APIResource { + /** + * Retrieve an agent step by its ID. + */ + retrieve( + agentId: string, + sessionId: string, + turnId: string, + stepId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get( + `/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/step/${stepId}`, + options, + ); + } +} + +export interface StepRetrieveResponse { + /** + * An inference step in an agent turn. + */ + step: + | AgentsAPI.InferenceStep + | AgentsAPI.ToolExecutionStep + | AgentsAPI.ShieldCallStep + | AgentsAPI.MemoryRetrievalStep; +} + +export declare namespace Steps { + export { type StepRetrieveResponse as StepRetrieveResponse }; +} diff --git a/src/resources/agents/turn.ts b/src/resources/agents/turn.ts new file mode 100644 index 0000000..8263743 --- /dev/null +++ b/src/resources/agents/turn.ts @@ -0,0 +1,527 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { APIPromise } from '../../core'; +import * as Core from '../../core'; +import * as TurnAPI from './turn'; +import * as Shared from '../shared'; +import * as AgentsAPI from './agents'; +import { Stream } from '../../streaming'; + +export class TurnResource extends APIResource { + /** + * Create a new turn for an agent. + */ + create( + agentId: string, + sessionId: string, + body: TurnCreateParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + create( + agentId: string, + sessionId: string, + body: TurnCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + agentId: string, + sessionId: string, + body: TurnCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | Turn>; + create( + agentId: string, + sessionId: string, + body: TurnCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn`, { + body, + ...options, + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; + } + + /** + * Retrieve an agent turn by its ID. + */ + retrieve( + agentId: string, + sessionId: string, + turnId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}`, options); + } + + /** + * Resume an agent turn with executed tool call responses. When a Turn has the + * status `awaiting_input` due to pending input from client side tool calls, this + * endpoint can be used to submit the outputs from the tool calls once they are + * ready. + */ + resume( + agentId: string, + sessionId: string, + turnId: string, + body: TurnResumeParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + resume( + agentId: string, + sessionId: string, + turnId: string, + body: TurnResumeParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + resume( + agentId: string, + sessionId: string, + turnId: string, + body: TurnResumeParamsBase, + options?: Core.RequestOptions, + ): APIPromise | Turn>; + resume( + agentId: string, + sessionId: string, + turnId: string, + body: TurnResumeParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, { + body, + ...options, + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; + } +} + +/** + * streamed agent turn completion response. + */ +export interface AgentTurnResponseStreamChunk { + event: TurnResponseEvent; +} + +/** + * A single turn in an interaction with an Agentic System. + */ +export interface Turn { + input_messages: Array; + + /** + * A message containing the model's (assistant) response in a chat conversation. + */ + output_message: Shared.CompletionMessage; + + session_id: string; + + started_at: string; + + steps: Array< + | AgentsAPI.InferenceStep + | AgentsAPI.ToolExecutionStep + | AgentsAPI.ShieldCallStep + | AgentsAPI.MemoryRetrievalStep + >; + + turn_id: string; + + completed_at?: string; + + output_attachments?: Array; +} + +export namespace Turn { + /** + * An attachment to an agent turn. + */ + export interface OutputAttachment { + /** + * The content of the attachment. + */ + content: + | string + | OutputAttachment.ImageContentItem + | OutputAttachment.TextContentItem + | Array + | OutputAttachment.URL; + + /** + * The MIME type of the attachment. + */ + mime_type: string; + } + + export namespace OutputAttachment { + /** + * A image content item + */ + export interface ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + image: ImageContentItem.Image; + + /** + * Discriminator type of the content item. Always "image" + */ + type: 'image'; + } + + export namespace ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + export interface Image { + /** + * base64 encoded image data as string + */ + data?: string; + + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + url?: Image.URL; + } + + export namespace Image { + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + export interface URL { + uri: string; + } + } + } + + /** + * A text content item + */ + export interface TextContentItem { + /** + * Text content + */ + text: string; + + /** + * Discriminator type of the content item. Always "text" + */ + type: 'text'; + } + + export interface URL { + uri: string; + } + } +} + +export interface TurnResponseEvent { + payload: TurnResponseEventPayload; +} + +export type TurnResponseEventPayload = + | TurnResponseEventPayload.AgentTurnResponseStepStartPayload + | TurnResponseEventPayload.AgentTurnResponseStepProgressPayload + | TurnResponseEventPayload.AgentTurnResponseStepCompletePayload + | TurnResponseEventPayload.AgentTurnResponseTurnStartPayload + | TurnResponseEventPayload.AgentTurnResponseTurnCompletePayload + | TurnResponseEventPayload.AgentTurnResponseTurnAwaitingInputPayload; + +export namespace TurnResponseEventPayload { + export interface AgentTurnResponseStepStartPayload { + event_type: 'step_start'; + + step_id: string; + + /** + * Type of the step in an agent turn. + */ + step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval'; + + metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; + } + + export interface AgentTurnResponseStepProgressPayload { + delta: Shared.ContentDelta; + + event_type: 'step_progress'; + + step_id: string; + + /** + * Type of the step in an agent turn. + */ + step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval'; + } + + export interface AgentTurnResponseStepCompletePayload { + event_type: 'step_complete'; + + /** + * An inference step in an agent turn. + */ + step_details: + | AgentsAPI.InferenceStep + | AgentsAPI.ToolExecutionStep + | AgentsAPI.ShieldCallStep + | AgentsAPI.MemoryRetrievalStep; + + step_id: string; + + /** + * Type of the step in an agent turn. + */ + step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval'; + } + + export interface AgentTurnResponseTurnStartPayload { + event_type: 'turn_start'; + + turn_id: string; + } + + export interface AgentTurnResponseTurnCompletePayload { + event_type: 'turn_complete'; + + /** + * A single turn in an interaction with an Agentic System. + */ + turn: TurnAPI.Turn; + } + + export interface AgentTurnResponseTurnAwaitingInputPayload { + event_type: 'turn_awaiting_input'; + + /** + * A single turn in an interaction with an Agentic System. + */ + turn: TurnAPI.Turn; + } +} + +export type TurnCreateParams = TurnCreateParamsNonStreaming | TurnCreateParamsStreaming; + +export interface TurnCreateParamsBase { + /** + * List of messages to start the turn with. + */ + messages: Array; + + /** + * (Optional) List of documents to create the turn with. + */ + documents?: Array; + + /** + * (Optional) If True, generate an SSE event stream of the response. Defaults to + * False. + */ + stream?: boolean; + + /** + * (Optional) The tool configuration to create the turn with, will be used to + * override the agent's tool_config. + */ + tool_config?: TurnCreateParams.ToolConfig; + + /** + * (Optional) List of toolgroups to create the turn with, will be used in addition + * to the agent's config toolgroups for the request. + */ + toolgroups?: Array; +} + +export namespace TurnCreateParams { + /** + * A document to be used by an agent. + */ + export interface Document { + /** + * The content of the document. + */ + content: + | string + | Document.ImageContentItem + | Document.TextContentItem + | Array + | Document.URL; + + /** + * The MIME type of the document. + */ + mime_type: string; + } + + export namespace Document { + /** + * A image content item + */ + export interface ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + image: ImageContentItem.Image; + + /** + * Discriminator type of the content item. Always "image" + */ + type: 'image'; + } + + export namespace ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + export interface Image { + /** + * base64 encoded image data as string + */ + data?: string; + + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + url?: Image.URL; + } + + export namespace Image { + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + export interface URL { + uri: string; + } + } + } + + /** + * A text content item + */ + export interface TextContentItem { + /** + * Text content + */ + text: string; + + /** + * Discriminator type of the content item. Always "text" + */ + type: 'text'; + } + + export interface URL { + uri: string; + } + } + + /** + * (Optional) The tool configuration to create the turn with, will be used to + * override the agent's tool_config. + */ + export interface ToolConfig { + /** + * (Optional) Config for how to override the default system prompt. - + * `SystemMessageBehavior.append`: Appends the provided system message to the + * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default + * system prompt with the provided system message. The system message can include + * the string '{{function_definitions}}' to indicate where the function definitions + * should be inserted. + */ + system_message_behavior?: 'append' | 'replace'; + + /** + * (Optional) Whether tool use is automatic, required, or none. Can also specify a + * tool name to use a specific tool. Defaults to ToolChoice.auto. + */ + tool_choice?: 'auto' | 'required' | 'none' | (string & {}); + + /** + * (Optional) Instructs the model how to format tool calls. By default, Llama Stack + * will attempt to use a format that is best adapted to the model. - + * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - + * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a + * tag. - `ToolPromptFormat.python_list`: The tool calls + * are output as Python syntax -- a list of function calls. + */ + tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; + } + + export interface AgentToolGroupWithArgs { + args: { [key: string]: boolean | number | string | Array | unknown | null }; + + name: string; + } + + export type TurnCreateParamsNonStreaming = TurnAPI.TurnCreateParamsNonStreaming; + export type TurnCreateParamsStreaming = TurnAPI.TurnCreateParamsStreaming; +} + +export interface TurnCreateParamsNonStreaming extends TurnCreateParamsBase { + /** + * (Optional) If True, generate an SSE event stream of the response. Defaults to + * False. + */ + stream?: false; +} + +export interface TurnCreateParamsStreaming extends TurnCreateParamsBase { + /** + * (Optional) If True, generate an SSE event stream of the response. Defaults to + * False. + */ + stream: true; +} + +export type TurnResumeParams = TurnResumeParamsNonStreaming | TurnResumeParamsStreaming; + +export interface TurnResumeParamsBase { + /** + * The tool call responses to resume the turn with. + */ + tool_responses: Array; + + /** + * Whether to stream the response. + */ + stream?: boolean; +} + +export namespace TurnResumeParams { + export type TurnResumeParamsNonStreaming = TurnAPI.TurnResumeParamsNonStreaming; + export type TurnResumeParamsStreaming = TurnAPI.TurnResumeParamsStreaming; +} + +export interface TurnResumeParamsNonStreaming extends TurnResumeParamsBase { + /** + * Whether to stream the response. + */ + stream?: false; +} + +export interface TurnResumeParamsStreaming extends TurnResumeParamsBase { + /** + * Whether to stream the response. + */ + stream: true; +} + +export declare namespace TurnResource { + export { + type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk, + type Turn as Turn, + type TurnResponseEvent as TurnResponseEvent, + type TurnResponseEventPayload as TurnResponseEventPayload, + type TurnCreateParams as TurnCreateParams, + type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming, + type TurnCreateParamsStreaming as TurnCreateParamsStreaming, + type TurnResumeParams as TurnResumeParams, + type TurnResumeParamsNonStreaming as TurnResumeParamsNonStreaming, + type TurnResumeParamsStreaming as TurnResumeParamsStreaming, + }; +} diff --git a/src/resources/benchmarks.ts b/src/resources/benchmarks.ts new file mode 100644 index 0000000..a95494f --- /dev/null +++ b/src/resources/benchmarks.ts @@ -0,0 +1,96 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../resource'; +import * as Core from '../core'; + +export class Benchmarks extends APIResource { + /** + * Get a benchmark by its ID. + */ + retrieve(benchmarkId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/eval/benchmarks/${benchmarkId}`, options); + } + + /** + * List all benchmarks. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1/eval/benchmarks', options) as Core.APIPromise<{ data: BenchmarkListResponse }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Register a benchmark. + */ + register(body: BenchmarkRegisterParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/v1/eval/benchmarks', { + body, + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } +} + +export interface Benchmark { + dataset_id: string; + + identifier: string; + + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + provider_id: string; + + scoring_functions: Array; + + type: 'benchmark'; + + provider_resource_id?: string; +} + +export interface ListBenchmarksResponse { + data: BenchmarkListResponse; +} + +export type BenchmarkListResponse = Array; + +export interface BenchmarkRegisterParams { + /** + * The ID of the benchmark to register. + */ + benchmark_id: string; + + /** + * The ID of the dataset to use for the benchmark. + */ + dataset_id: string; + + /** + * The scoring functions to use for the benchmark. + */ + scoring_functions: Array; + + /** + * The metadata to use for the benchmark. + */ + metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * The ID of the provider benchmark to use for the benchmark. + */ + provider_benchmark_id?: string; + + /** + * The ID of the provider to use for the benchmark. + */ + provider_id?: string; +} + +export declare namespace Benchmarks { + export { + type Benchmark as Benchmark, + type ListBenchmarksResponse as ListBenchmarksResponse, + type BenchmarkListResponse as BenchmarkListResponse, + type BenchmarkRegisterParams as BenchmarkRegisterParams, + }; +} diff --git a/src/resources/openai/v1.ts b/src/resources/chat.ts similarity index 74% rename from src/resources/openai/v1.ts rename to src/resources/chat.ts index d02995c..b3dd87a 100644 --- a/src/resources/openai/v1.ts +++ b/src/resources/chat.ts @@ -1,3 +1,3 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export * from './v1/index'; +export * from './chat/index'; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts new file mode 100644 index 0000000..3c693ee --- /dev/null +++ b/src/resources/chat/chat.ts @@ -0,0 +1,212 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as CompletionsAPI from './completions'; +import { + CompletionCreateParams, + CompletionCreateParamsNonStreaming, + CompletionCreateParamsStreaming, + CompletionCreateResponse, + CompletionListParams, + CompletionListResponse, + CompletionRetrieveResponse, + Completions, +} from './completions'; + +export class Chat extends APIResource { + completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); +} + +/** + * Chunk from a streaming response to an OpenAI-compatible chat completion request. + */ +export interface ChatCompletionChunk { + /** + * The ID of the chat completion + */ + id: string; + + /** + * List of choices + */ + choices: Array; + + /** + * The Unix timestamp in seconds when the chat completion was created + */ + created: number; + + /** + * The model that was used to generate the chat completion + */ + model: string; + + /** + * The object type, which will be "chat.completion.chunk" + */ + object: 'chat.completion.chunk'; +} + +export namespace ChatCompletionChunk { + /** + * A chunk choice from an OpenAI-compatible chat completion streaming response. + */ + export interface Choice { + /** + * The delta from the chunk + */ + delta: Choice.Delta; + + /** + * The reason the model stopped generating + */ + finish_reason: string; + + /** + * The index of the choice + */ + index: number; + + /** + * (Optional) The log probabilities for the tokens in the message + */ + logprobs?: Choice.Logprobs; + } + + export namespace Choice { + /** + * The delta from the chunk + */ + export interface Delta { + /** + * (Optional) The content of the delta + */ + content?: string; + + /** + * (Optional) The refusal of the delta + */ + refusal?: string; + + /** + * (Optional) The role of the delta + */ + role?: string; + + /** + * (Optional) The tool calls of the delta + */ + tool_calls?: Array; + } + + export namespace Delta { + export interface ToolCall { + type: 'function'; + + id?: string; + + function?: ToolCall.Function; + + index?: number; + } + + export namespace ToolCall { + export interface Function { + arguments?: string; + + name?: string; + } + } + } + + /** + * (Optional) The log probabilities for the tokens in the message + */ + export interface Logprobs { + /** + * (Optional) The log probabilities for the tokens in the message + */ + content?: Array; + + /** + * (Optional) The log probabilities for the tokens in the message + */ + refusal?: Array; + } + + export namespace Logprobs { + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Content { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Content { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Refusal { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Refusal { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + } + } +} + +Chat.Completions = Completions; + +export declare namespace Chat { + export { type ChatCompletionChunk as ChatCompletionChunk }; + + export { + Completions as Completions, + type CompletionCreateResponse as CompletionCreateResponse, + type CompletionRetrieveResponse as CompletionRetrieveResponse, + type CompletionListResponse as CompletionListResponse, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + type CompletionListParams as CompletionListParams, + }; +} diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts new file mode 100644 index 0000000..d103bb7 --- /dev/null +++ b/src/resources/chat/completions.ts @@ -0,0 +1,2284 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import { APIPromise } from '../../core'; +import * as Core from '../../core'; +import * as CompletionsAPI from './completions'; +import * as ChatAPI from './chat'; +import { Stream } from '../../streaming'; + +export class Completions extends APIResource { + /** + * Generate an OpenAI-compatible chat completion for the given messages using the + * specified model. + */ + create( + body: CompletionCreateParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + create( + body: CompletionCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + body: CompletionCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | CompletionCreateResponse>; + create( + body: CompletionCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return this._client.post('/v1/openai/v1/chat/completions', { + body, + ...options, + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; + } + + /** + * Describe a chat completion by its ID. + */ + retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/openai/v1/chat/completions/${completionId}`, options); + } + + /** + * List all chat completions. + */ + list(query?: CompletionListParams, options?: Core.RequestOptions): Core.APIPromise; + list(options?: Core.RequestOptions): Core.APIPromise; + list( + query: CompletionListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.get('/v1/openai/v1/chat/completions', { query, ...options }); + } +} + +/** + * Response from an OpenAI-compatible chat completion request. + */ +export type CompletionCreateResponse = + | CompletionCreateResponse.OpenAIChatCompletion + | ChatAPI.ChatCompletionChunk; + +export namespace CompletionCreateResponse { + /** + * Response from an OpenAI-compatible chat completion request. + */ + export interface OpenAIChatCompletion { + /** + * The ID of the chat completion + */ + id: string; + + /** + * List of choices + */ + choices: Array; + + /** + * The Unix timestamp in seconds when the chat completion was created + */ + created: number; + + /** + * The model that was used to generate the chat completion + */ + model: string; + + /** + * The object type, which will be "chat.completion" + */ + object: 'chat.completion'; + } + + export namespace OpenAIChatCompletion { + /** + * A choice from an OpenAI-compatible chat completion response. + */ + export interface Choice { + /** + * The reason the model stopped generating + */ + finish_reason: string; + + /** + * The index of the choice + */ + index: number; + + /** + * The message from the model + */ + message: + | Choice.OpenAIUserMessageParam + | Choice.OpenAISystemMessageParam + | Choice.OpenAIAssistantMessageParam + | Choice.OpenAIToolMessageParam + | Choice.OpenAIDeveloperMessageParam; + + /** + * (Optional) The log probabilities for the tokens in the message + */ + logprobs?: Choice.Logprobs; + } + + export namespace Choice { + /** + * A message from the user in an OpenAI-compatible chat completion request. + */ + export interface OpenAIUserMessageParam { + /** + * The content of the message, which can include text and other media + */ + content: + | string + | Array< + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "user" to identify this as a user message + */ + role: 'user'; + + /** + * (Optional) The name of the user message participant. + */ + name?: string; + } + + export namespace OpenAIUserMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A system message providing instructions or context to the model. + */ + export interface OpenAISystemMessageParam { + /** + * The content of the "system prompt". If multiple system messages are provided, + * they are concatenated. The underlying Llama Stack code may also add other system + * messages (for example, for formatting tool definitions). + */ + content: + | string + | Array< + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "system" to identify this as a system message + */ + role: 'system'; + + /** + * (Optional) The name of the system message participant. + */ + name?: string; + } + + export namespace OpenAISystemMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message containing the model's (assistant) response in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIAssistantMessageParam { + /** + * Must be "assistant" to identify this as the model's response + */ + role: 'assistant'; + + /** + * The content of the model's response + */ + content?: + | string + | Array< + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * (Optional) The name of the assistant message participant. + */ + name?: string; + + /** + * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object. + */ + tool_calls?: Array; + } + + export namespace OpenAIAssistantMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + + export interface ToolCall { + type: 'function'; + + id?: string; + + function?: ToolCall.Function; + + index?: number; + } + + export namespace ToolCall { + export interface Function { + arguments?: string; + + name?: string; + } + } + } + + /** + * A message representing the result of a tool invocation in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIToolMessageParam { + /** + * The response content from the tool + */ + content: + | string + | Array< + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "tool" to identify this as a tool response + */ + role: 'tool'; + + /** + * Unique identifier for the tool call this response is for + */ + tool_call_id: string; + } + + export namespace OpenAIToolMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message from the developer in an OpenAI-compatible chat completion request. + */ + export interface OpenAIDeveloperMessageParam { + /** + * The content of the developer message + */ + content: + | string + | Array< + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "developer" to identify this as a developer message + */ + role: 'developer'; + + /** + * (Optional) The name of the developer message participant. + */ + name?: string; + } + + export namespace OpenAIDeveloperMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * (Optional) The log probabilities for the tokens in the message + */ + export interface Logprobs { + /** + * (Optional) The log probabilities for the tokens in the message + */ + content?: Array; + + /** + * (Optional) The log probabilities for the tokens in the message + */ + refusal?: Array; + } + + export namespace Logprobs { + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Content { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Content { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Refusal { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Refusal { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + } + } + } +} + +export interface CompletionRetrieveResponse { + /** + * The ID of the chat completion + */ + id: string; + + /** + * List of choices + */ + choices: Array; + + /** + * The Unix timestamp in seconds when the chat completion was created + */ + created: number; + + input_messages: Array< + | CompletionRetrieveResponse.OpenAIUserMessageParam + | CompletionRetrieveResponse.OpenAISystemMessageParam + | CompletionRetrieveResponse.OpenAIAssistantMessageParam + | CompletionRetrieveResponse.OpenAIToolMessageParam + | CompletionRetrieveResponse.OpenAIDeveloperMessageParam + >; + + /** + * The model that was used to generate the chat completion + */ + model: string; + + /** + * The object type, which will be "chat.completion" + */ + object: 'chat.completion'; +} + +export namespace CompletionRetrieveResponse { + /** + * A choice from an OpenAI-compatible chat completion response. + */ + export interface Choice { + /** + * The reason the model stopped generating + */ + finish_reason: string; + + /** + * The index of the choice + */ + index: number; + + /** + * The message from the model + */ + message: + | Choice.OpenAIUserMessageParam + | Choice.OpenAISystemMessageParam + | Choice.OpenAIAssistantMessageParam + | Choice.OpenAIToolMessageParam + | Choice.OpenAIDeveloperMessageParam; + + /** + * (Optional) The log probabilities for the tokens in the message + */ + logprobs?: Choice.Logprobs; + } + + export namespace Choice { + /** + * A message from the user in an OpenAI-compatible chat completion request. + */ + export interface OpenAIUserMessageParam { + /** + * The content of the message, which can include text and other media + */ + content: + | string + | Array< + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "user" to identify this as a user message + */ + role: 'user'; + + /** + * (Optional) The name of the user message participant. + */ + name?: string; + } + + export namespace OpenAIUserMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A system message providing instructions or context to the model. + */ + export interface OpenAISystemMessageParam { + /** + * The content of the "system prompt". If multiple system messages are provided, + * they are concatenated. The underlying Llama Stack code may also add other system + * messages (for example, for formatting tool definitions). + */ + content: + | string + | Array< + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "system" to identify this as a system message + */ + role: 'system'; + + /** + * (Optional) The name of the system message participant. + */ + name?: string; + } + + export namespace OpenAISystemMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message containing the model's (assistant) response in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIAssistantMessageParam { + /** + * Must be "assistant" to identify this as the model's response + */ + role: 'assistant'; + + /** + * The content of the model's response + */ + content?: + | string + | Array< + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * (Optional) The name of the assistant message participant. + */ + name?: string; + + /** + * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object. + */ + tool_calls?: Array; + } + + export namespace OpenAIAssistantMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + + export interface ToolCall { + type: 'function'; + + id?: string; + + function?: ToolCall.Function; + + index?: number; + } + + export namespace ToolCall { + export interface Function { + arguments?: string; + + name?: string; + } + } + } + + /** + * A message representing the result of a tool invocation in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIToolMessageParam { + /** + * The response content from the tool + */ + content: + | string + | Array< + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "tool" to identify this as a tool response + */ + role: 'tool'; + + /** + * Unique identifier for the tool call this response is for + */ + tool_call_id: string; + } + + export namespace OpenAIToolMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message from the developer in an OpenAI-compatible chat completion request. + */ + export interface OpenAIDeveloperMessageParam { + /** + * The content of the developer message + */ + content: + | string + | Array< + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "developer" to identify this as a developer message + */ + role: 'developer'; + + /** + * (Optional) The name of the developer message participant. + */ + name?: string; + } + + export namespace OpenAIDeveloperMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * (Optional) The log probabilities for the tokens in the message + */ + export interface Logprobs { + /** + * (Optional) The log probabilities for the tokens in the message + */ + content?: Array; + + /** + * (Optional) The log probabilities for the tokens in the message + */ + refusal?: Array; + } + + export namespace Logprobs { + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Content { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Content { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Refusal { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Refusal { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + } + } + + /** + * A message from the user in an OpenAI-compatible chat completion request. + */ + export interface OpenAIUserMessageParam { + /** + * The content of the message, which can include text and other media + */ + content: + | string + | Array< + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "user" to identify this as a user message + */ + role: 'user'; + + /** + * (Optional) The name of the user message participant. + */ + name?: string; + } + + export namespace OpenAIUserMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A system message providing instructions or context to the model. + */ + export interface OpenAISystemMessageParam { + /** + * The content of the "system prompt". If multiple system messages are provided, + * they are concatenated. The underlying Llama Stack code may also add other system + * messages (for example, for formatting tool definitions). + */ + content: + | string + | Array< + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "system" to identify this as a system message + */ + role: 'system'; + + /** + * (Optional) The name of the system message participant. + */ + name?: string; + } + + export namespace OpenAISystemMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message containing the model's (assistant) response in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIAssistantMessageParam { + /** + * Must be "assistant" to identify this as the model's response + */ + role: 'assistant'; + + /** + * The content of the model's response + */ + content?: + | string + | Array< + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * (Optional) The name of the assistant message participant. + */ + name?: string; + + /** + * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object. + */ + tool_calls?: Array; + } + + export namespace OpenAIAssistantMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + + export interface ToolCall { + type: 'function'; + + id?: string; + + function?: ToolCall.Function; + + index?: number; + } + + export namespace ToolCall { + export interface Function { + arguments?: string; + + name?: string; + } + } + } + + /** + * A message representing the result of a tool invocation in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIToolMessageParam { + /** + * The response content from the tool + */ + content: + | string + | Array< + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "tool" to identify this as a tool response + */ + role: 'tool'; + + /** + * Unique identifier for the tool call this response is for + */ + tool_call_id: string; + } + + export namespace OpenAIToolMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message from the developer in an OpenAI-compatible chat completion request. + */ + export interface OpenAIDeveloperMessageParam { + /** + * The content of the developer message + */ + content: + | string + | Array< + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "developer" to identify this as a developer message + */ + role: 'developer'; + + /** + * (Optional) The name of the developer message participant. + */ + name?: string; + } + + export namespace OpenAIDeveloperMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } +} + +export interface CompletionListResponse { + data: Array; + + first_id: string; + + has_more: boolean; + + last_id: string; + + object: 'list'; +} + +export namespace CompletionListResponse { + export interface Data { + /** + * The ID of the chat completion + */ + id: string; + + /** + * List of choices + */ + choices: Array; + + /** + * The Unix timestamp in seconds when the chat completion was created + */ + created: number; + + input_messages: Array< + | Data.OpenAIUserMessageParam + | Data.OpenAISystemMessageParam + | Data.OpenAIAssistantMessageParam + | Data.OpenAIToolMessageParam + | Data.OpenAIDeveloperMessageParam + >; + + /** + * The model that was used to generate the chat completion + */ + model: string; + + /** + * The object type, which will be "chat.completion" + */ + object: 'chat.completion'; + } + + export namespace Data { + /** + * A choice from an OpenAI-compatible chat completion response. + */ + export interface Choice { + /** + * The reason the model stopped generating + */ + finish_reason: string; + + /** + * The index of the choice + */ + index: number; + + /** + * The message from the model + */ + message: + | Choice.OpenAIUserMessageParam + | Choice.OpenAISystemMessageParam + | Choice.OpenAIAssistantMessageParam + | Choice.OpenAIToolMessageParam + | Choice.OpenAIDeveloperMessageParam; + + /** + * (Optional) The log probabilities for the tokens in the message + */ + logprobs?: Choice.Logprobs; + } + + export namespace Choice { + /** + * A message from the user in an OpenAI-compatible chat completion request. + */ + export interface OpenAIUserMessageParam { + /** + * The content of the message, which can include text and other media + */ + content: + | string + | Array< + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "user" to identify this as a user message + */ + role: 'user'; + + /** + * (Optional) The name of the user message participant. + */ + name?: string; + } + + export namespace OpenAIUserMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A system message providing instructions or context to the model. + */ + export interface OpenAISystemMessageParam { + /** + * The content of the "system prompt". If multiple system messages are provided, + * they are concatenated. The underlying Llama Stack code may also add other system + * messages (for example, for formatting tool definitions). + */ + content: + | string + | Array< + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "system" to identify this as a system message + */ + role: 'system'; + + /** + * (Optional) The name of the system message participant. + */ + name?: string; + } + + export namespace OpenAISystemMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message containing the model's (assistant) response in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIAssistantMessageParam { + /** + * Must be "assistant" to identify this as the model's response + */ + role: 'assistant'; + + /** + * The content of the model's response + */ + content?: + | string + | Array< + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * (Optional) The name of the assistant message participant. + */ + name?: string; + + /** + * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object. + */ + tool_calls?: Array; + } + + export namespace OpenAIAssistantMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + + export interface ToolCall { + type: 'function'; + + id?: string; + + function?: ToolCall.Function; + + index?: number; + } + + export namespace ToolCall { + export interface Function { + arguments?: string; + + name?: string; + } + } + } + + /** + * A message representing the result of a tool invocation in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIToolMessageParam { + /** + * The response content from the tool + */ + content: + | string + | Array< + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "tool" to identify this as a tool response + */ + role: 'tool'; + + /** + * Unique identifier for the tool call this response is for + */ + tool_call_id: string; + } + + export namespace OpenAIToolMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message from the developer in an OpenAI-compatible chat completion request. + */ + export interface OpenAIDeveloperMessageParam { + /** + * The content of the developer message + */ + content: + | string + | Array< + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "developer" to identify this as a developer message + */ + role: 'developer'; + + /** + * (Optional) The name of the developer message participant. + */ + name?: string; + } + + export namespace OpenAIDeveloperMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * (Optional) The log probabilities for the tokens in the message + */ + export interface Logprobs { + /** + * (Optional) The log probabilities for the tokens in the message + */ + content?: Array; + + /** + * (Optional) The log probabilities for the tokens in the message + */ + refusal?: Array; + } + + export namespace Logprobs { + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Content { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Content { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Refusal { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Refusal { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + } + } + + /** + * A message from the user in an OpenAI-compatible chat completion request. + */ + export interface OpenAIUserMessageParam { + /** + * The content of the message, which can include text and other media + */ + content: + | string + | Array< + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "user" to identify this as a user message + */ + role: 'user'; + + /** + * (Optional) The name of the user message participant. + */ + name?: string; + } + + export namespace OpenAIUserMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A system message providing instructions or context to the model. + */ + export interface OpenAISystemMessageParam { + /** + * The content of the "system prompt". If multiple system messages are provided, + * they are concatenated. The underlying Llama Stack code may also add other system + * messages (for example, for formatting tool definitions). + */ + content: + | string + | Array< + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "system" to identify this as a system message + */ + role: 'system'; + + /** + * (Optional) The name of the system message participant. + */ + name?: string; + } + + export namespace OpenAISystemMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message containing the model's (assistant) response in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIAssistantMessageParam { + /** + * Must be "assistant" to identify this as the model's response + */ + role: 'assistant'; + + /** + * The content of the model's response + */ + content?: + | string + | Array< + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * (Optional) The name of the assistant message participant. + */ + name?: string; + + /** + * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object. + */ + tool_calls?: Array; + } + + export namespace OpenAIAssistantMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + + export interface ToolCall { + type: 'function'; + + id?: string; + + function?: ToolCall.Function; + + index?: number; + } + + export namespace ToolCall { + export interface Function { + arguments?: string; + + name?: string; + } + } + } + + /** + * A message representing the result of a tool invocation in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIToolMessageParam { + /** + * The response content from the tool + */ + content: + | string + | Array< + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "tool" to identify this as a tool response + */ + role: 'tool'; + + /** + * Unique identifier for the tool call this response is for + */ + tool_call_id: string; + } + + export namespace OpenAIToolMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message from the developer in an OpenAI-compatible chat completion request. + */ + export interface OpenAIDeveloperMessageParam { + /** + * The content of the developer message + */ + content: + | string + | Array< + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "developer" to identify this as a developer message + */ + role: 'developer'; + + /** + * (Optional) The name of the developer message participant. + */ + name?: string; + } + + export namespace OpenAIDeveloperMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + } +} + +export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming; + +export interface CompletionCreateParamsBase { + /** + * List of messages in the conversation. + */ + messages: Array< + | CompletionCreateParams.OpenAIUserMessageParam + | CompletionCreateParams.OpenAISystemMessageParam + | CompletionCreateParams.OpenAIAssistantMessageParam + | CompletionCreateParams.OpenAIToolMessageParam + | CompletionCreateParams.OpenAIDeveloperMessageParam + >; + + /** + * The identifier of the model to use. The model must be registered with Llama + * Stack and available via the /models endpoint. + */ + model: string; + + /** + * (Optional) The penalty for repeated tokens. + */ + frequency_penalty?: number; + + /** + * (Optional) The function call to use. + */ + function_call?: string | { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) List of functions to use. + */ + functions?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + /** + * (Optional) The logit bias to use. + */ + logit_bias?: { [key: string]: number }; + + /** + * (Optional) The log probabilities to use. + */ + logprobs?: boolean; + + /** + * (Optional) The maximum number of tokens to generate. + */ + max_completion_tokens?: number; + + /** + * (Optional) The maximum number of tokens to generate. + */ + max_tokens?: number; + + /** + * (Optional) The number of completions to generate. + */ + n?: number; + + /** + * (Optional) Whether to parallelize tool calls. + */ + parallel_tool_calls?: boolean; + + /** + * (Optional) The penalty for repeated tokens. + */ + presence_penalty?: number; + + /** + * (Optional) The response format to use. + */ + response_format?: + | CompletionCreateParams.OpenAIResponseFormatText + | CompletionCreateParams.OpenAIResponseFormatJsonSchema + | CompletionCreateParams.OpenAIResponseFormatJsonObject; + + /** + * (Optional) The seed to use. + */ + seed?: number; + + /** + * (Optional) The stop tokens to use. + */ + stop?: string | Array; + + /** + * (Optional) Whether to stream the response. + */ + stream?: boolean; + + /** + * (Optional) The stream options to use. + */ + stream_options?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) The temperature to use. + */ + temperature?: number; + + /** + * (Optional) The tool choice to use. + */ + tool_choice?: string | { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) The tools to use. + */ + tools?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + /** + * (Optional) The top log probabilities to use. + */ + top_logprobs?: number; + + /** + * (Optional) The top p to use. + */ + top_p?: number; + + /** + * (Optional) The user to use. + */ + user?: string; +} + +export namespace CompletionCreateParams { + /** + * A message from the user in an OpenAI-compatible chat completion request. + */ + export interface OpenAIUserMessageParam { + /** + * The content of the message, which can include text and other media + */ + content: + | string + | Array< + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "user" to identify this as a user message + */ + role: 'user'; + + /** + * (Optional) The name of the user message participant. + */ + name?: string; + } + + export namespace OpenAIUserMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A system message providing instructions or context to the model. + */ + export interface OpenAISystemMessageParam { + /** + * The content of the "system prompt". If multiple system messages are provided, + * they are concatenated. The underlying Llama Stack code may also add other system + * messages (for example, for formatting tool definitions). + */ + content: + | string + | Array< + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAISystemMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "system" to identify this as a system message + */ + role: 'system'; + + /** + * (Optional) The name of the system message participant. + */ + name?: string; + } + + export namespace OpenAISystemMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message containing the model's (assistant) response in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIAssistantMessageParam { + /** + * Must be "assistant" to identify this as the model's response + */ + role: 'assistant'; + + /** + * The content of the model's response + */ + content?: + | string + | Array< + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIAssistantMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * (Optional) The name of the assistant message participant. + */ + name?: string; + + /** + * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object. + */ + tool_calls?: Array; + } + + export namespace OpenAIAssistantMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + + export interface ToolCall { + type: 'function'; + + id?: string; + + function?: ToolCall.Function; + + index?: number; + } + + export namespace ToolCall { + export interface Function { + arguments?: string; + + name?: string; + } + } + } + + /** + * A message representing the result of a tool invocation in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIToolMessageParam { + /** + * The response content from the tool + */ + content: + | string + | Array< + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIToolMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "tool" to identify this as a tool response + */ + role: 'tool'; + + /** + * Unique identifier for the tool call this response is for + */ + tool_call_id: string; + } + + export namespace OpenAIToolMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + /** + * A message from the developer in an OpenAI-compatible chat completion request. + */ + export interface OpenAIDeveloperMessageParam { + /** + * The content of the developer message + */ + content: + | string + | Array< + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIDeveloperMessageParam.OpenAIChatCompletionContentPartImageParam + >; + + /** + * Must be "developer" to identify this as a developer message + */ + role: 'developer'; + + /** + * (Optional) The name of the developer message participant. + */ + name?: string; + } + + export namespace OpenAIDeveloperMessageParam { + export interface OpenAIChatCompletionContentPartTextParam { + text: string; + + type: 'text'; + } + + export interface OpenAIChatCompletionContentPartImageParam { + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + export interface ImageURL { + url: string; + + detail?: string; + } + } + } + + export interface OpenAIResponseFormatText { + type: 'text'; + } + + export interface OpenAIResponseFormatJsonSchema { + json_schema: OpenAIResponseFormatJsonSchema.JsonSchema; + + type: 'json_schema'; + } + + export namespace OpenAIResponseFormatJsonSchema { + export interface JsonSchema { + name: string; + + description?: string; + + schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + + strict?: boolean; + } + } + + export interface OpenAIResponseFormatJsonObject { + type: 'json_object'; + } + + export type CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming; + export type CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming; +} + +export interface CompletionCreateParamsNonStreaming extends CompletionCreateParamsBase { + /** + * (Optional) Whether to stream the response. + */ + stream?: false; +} + +export interface CompletionCreateParamsStreaming extends CompletionCreateParamsBase { + /** + * (Optional) Whether to stream the response. + */ + stream: true; +} + +export interface CompletionListParams { + /** + * The ID of the last chat completion to return. + */ + after?: string; + + /** + * The maximum number of chat completions to return. + */ + limit?: number; + + /** + * The model to filter by. + */ + model?: string; + + /** + * The order to sort the chat completions by: "asc" or "desc". Defaults to "desc". + */ + order?: 'asc' | 'desc'; +} + +export declare namespace Completions { + export { + type CompletionCreateResponse as CompletionCreateResponse, + type CompletionRetrieveResponse as CompletionRetrieveResponse, + type CompletionListResponse as CompletionListResponse, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + type CompletionListParams as CompletionListParams, + }; +} diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts new file mode 100644 index 0000000..31c9aba --- /dev/null +++ b/src/resources/chat/index.ts @@ -0,0 +1,13 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Chat, type ChatCompletionChunk } from './chat'; +export { + Completions, + type CompletionCreateResponse, + type CompletionRetrieveResponse, + type CompletionListResponse, + type CompletionCreateParams, + type CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming, + type CompletionListParams, +} from './completions'; diff --git a/src/resources/completions.ts b/src/resources/completions.ts new file mode 100644 index 0000000..0ade7ab --- /dev/null +++ b/src/resources/completions.ts @@ -0,0 +1,273 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../resource'; +import { APIPromise } from '../core'; +import * as Core from '../core'; +import * as CompletionsAPI from './completions'; +import { Stream } from '../streaming'; + +export class Completions extends APIResource { + /** + * Generate an OpenAI-compatible completion for the given prompt using the + * specified model. + */ + create( + body: CompletionCreateParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + create( + body: CompletionCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + body: CompletionCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | CompletionCreateResponse>; + create( + body: CompletionCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return this._client.post('/v1/openai/v1/completions', { + body, + ...options, + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; + } +} + +/** + * Response from an OpenAI-compatible completion request. + */ +export interface CompletionCreateResponse { + id: string; + + choices: Array; + + created: number; + + model: string; + + object: 'text_completion'; +} + +export namespace CompletionCreateResponse { + /** + * A choice from an OpenAI-compatible completion response. + */ + export interface Choice { + finish_reason: string; + + index: number; + + text: string; + + /** + * The log probabilities for the tokens in the message from an OpenAI-compatible + * chat completion response. + */ + logprobs?: Choice.Logprobs; + } + + export namespace Choice { + /** + * The log probabilities for the tokens in the message from an OpenAI-compatible + * chat completion response. + */ + export interface Logprobs { + /** + * (Optional) The log probabilities for the tokens in the message + */ + content?: Array; + + /** + * (Optional) The log probabilities for the tokens in the message + */ + refusal?: Array; + } + + export namespace Logprobs { + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Content { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Content { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + + /** + * The log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface Refusal { + token: string; + + logprob: number; + + top_logprobs: Array; + + bytes?: Array; + } + + export namespace Refusal { + /** + * The top log probability for a token from an OpenAI-compatible chat completion + * response. + */ + export interface TopLogprob { + token: string; + + logprob: number; + + bytes?: Array; + } + } + } + } +} + +export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming; + +export interface CompletionCreateParamsBase { + /** + * The identifier of the model to use. The model must be registered with Llama + * Stack and available via the /models endpoint. + */ + model: string; + + /** + * The prompt to generate a completion for. + */ + prompt: string | Array | Array | Array>; + + /** + * (Optional) The number of completions to generate. + */ + best_of?: number; + + /** + * (Optional) Whether to echo the prompt. + */ + echo?: boolean; + + /** + * (Optional) The penalty for repeated tokens. + */ + frequency_penalty?: number; + + guided_choice?: Array; + + /** + * (Optional) The logit bias to use. + */ + logit_bias?: { [key: string]: number }; + + /** + * (Optional) The log probabilities to use. + */ + logprobs?: boolean; + + /** + * (Optional) The maximum number of tokens to generate. + */ + max_tokens?: number; + + /** + * (Optional) The number of completions to generate. + */ + n?: number; + + /** + * (Optional) The penalty for repeated tokens. + */ + presence_penalty?: number; + + prompt_logprobs?: number; + + /** + * (Optional) The seed to use. + */ + seed?: number; + + /** + * (Optional) The stop tokens to use. + */ + stop?: string | Array; + + /** + * (Optional) Whether to stream the response. + */ + stream?: boolean; + + /** + * (Optional) The stream options to use. + */ + stream_options?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) The suffix that should be appended to the completion. + */ + suffix?: string; + + /** + * (Optional) The temperature to use. + */ + temperature?: number; + + /** + * (Optional) The top p to use. + */ + top_p?: number; + + /** + * (Optional) The user to use. + */ + user?: string; +} + +export namespace CompletionCreateParams { + export type CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming; + export type CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming; +} + +export interface CompletionCreateParamsNonStreaming extends CompletionCreateParamsBase { + /** + * (Optional) Whether to stream the response. + */ + stream?: false; +} + +export interface CompletionCreateParamsStreaming extends CompletionCreateParamsBase { + /** + * (Optional) Whether to stream the response. + */ + stream: true; +} + +export declare namespace Completions { + export { + type CompletionCreateResponse as CompletionCreateResponse, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; +} diff --git a/src/resources/datasetio.ts b/src/resources/datasetio.ts deleted file mode 100644 index 9b0581d..0000000 --- a/src/resources/datasetio.ts +++ /dev/null @@ -1,75 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { buildHeaders } from '../internal/headers'; -import { RequestOptions } from '../internal/request-options'; -import { path } from '../internal/utils/path'; - -export class Datasetio extends APIResource { - appendRows(datasetID: string, body: DatasetioAppendRowsParams, options?: RequestOptions): APIPromise { - return this._client.post(path`/v1/datasetio/append-rows/${datasetID}`, { - body, - ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), - }); - } - - /** - * Get a paginated list of rows from a dataset. Uses offset-based pagination where: - * - * - start_index: The starting index (0-based). If None, starts from beginning. - * - limit: Number of items to return. If None or -1, returns all items. - * - * The response includes: - * - * - data: List of items for the current page - * - has_more: Whether there are more items available after this set - */ - iterateRows( - datasetID: string, - query: DatasetioIterateRowsParams | null | undefined = {}, - options?: RequestOptions, - ): APIPromise { - return this._client.get(path`/v1/datasetio/iterrows/${datasetID}`, { query, ...options }); - } -} - -/** - * A generic paginated response that follows a simple format. - */ -export interface DatasetioIterateRowsResponse { - /** - * The list of items for the current page - */ - data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * Whether there are more items available after this set - */ - has_more: boolean; -} - -export interface DatasetioAppendRowsParams { - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; -} - -export interface DatasetioIterateRowsParams { - /** - * The number of rows to get. - */ - limit?: number; - - /** - * Index into dataset for the first row to get. Get all rows if None. - */ - start_index?: number; -} - -export declare namespace Datasetio { - export { - type DatasetioIterateRowsResponse as DatasetioIterateRowsResponse, - type DatasetioAppendRowsParams as DatasetioAppendRowsParams, - type DatasetioIterateRowsParams as DatasetioIterateRowsParams, - }; -} diff --git a/src/resources/datasets.ts b/src/resources/datasets.ts index 76be739..c875e35 100644 --- a/src/resources/datasets.ts +++ b/src/resources/datasets.ts @@ -1,41 +1,117 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { buildHeaders } from '../internal/headers'; -import { RequestOptions } from '../internal/request-options'; -import { path } from '../internal/utils/path'; +import { APIResource } from '../resource'; +import { isRequestOptions } from '../core'; +import * as Core from '../core'; export class Datasets extends APIResource { /** - * Register a new dataset. + * Get a dataset by its ID. */ - create(body: DatasetCreateParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/datasets', { body, ...options }); + retrieve(datasetId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/datasets/${datasetId}`, options); } - retrieve(datasetID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/datasets/${datasetID}`, options); + /** + * List all datasets. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1/datasets', options) as Core.APIPromise<{ data: DatasetListResponse }> + )._thenUnwrap((obj) => obj.data); } - list(options?: RequestOptions): APIPromise { - return this._client.get('/v1/datasets', options); + /** + * Append rows to a dataset. + */ + appendrows( + datasetId: string, + body: DatasetAppendrowsParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/datasetio/append-rows/${datasetId}`, { + body, + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); } - delete(datasetID: string, options?: RequestOptions): APIPromise { - return this._client.delete(path`/v1/datasets/${datasetID}`, { + /** + * Get a paginated list of rows from a dataset. Uses offset-based pagination where: + * + * - start_index: The starting index (0-based). If None, starts from beginning. + * - limit: Number of items to return. If None or -1, returns all items. + * + * The response includes: + * + * - data: List of items for the current page. + * - has_more: Whether there are more items available after this set. + */ + iterrows( + datasetId: string, + query?: DatasetIterrowsParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + iterrows(datasetId: string, options?: Core.RequestOptions): Core.APIPromise; + iterrows( + datasetId: string, + query: DatasetIterrowsParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.iterrows(datasetId, {}, query); + } + return this._client.get(`/v1/datasetio/iterrows/${datasetId}`, { query, ...options }); + } + + /** + * Register a new dataset. + */ + register( + body: DatasetRegisterParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/v1/datasets', { body, ...options }); + } + + /** + * Unregister a dataset by its ID. + */ + unregister(datasetId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/datasets/${datasetId}`, { ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } } -/** - * A dataset that can be obtained from a URI. - */ -export type DataSource = DataSource.UriDataSource | DataSource.RowsDataSource; +export interface ListDatasetsResponse { + data: DatasetListResponse; +} + +export interface DatasetRetrieveResponse { + identifier: string; + + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + provider_id: string; + + /** + * Purpose of the dataset. Each purpose has a required input data schema. + */ + purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; -export namespace DataSource { + /** + * A dataset that can be obtained from a URI. + */ + source: DatasetRetrieveResponse.UriDataSource | DatasetRetrieveResponse.RowsDataSource; + + type: 'dataset'; + + provider_resource_id?: string; +} + +export namespace DatasetRetrieveResponse { /** * A dataset that can be obtained from a URI. */ @@ -65,7 +141,83 @@ export namespace DataSource { } } -export interface Dataset { +export type DatasetListResponse = Array; + +export namespace DatasetListResponse { + export interface DatasetListResponseItem { + identifier: string; + + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + provider_id: string; + + /** + * Purpose of the dataset. Each purpose has a required input data schema. + */ + purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; + + /** + * A dataset that can be obtained from a URI. + */ + source: DatasetListResponseItem.UriDataSource | DatasetListResponseItem.RowsDataSource; + + type: 'dataset'; + + provider_resource_id?: string; + } + + export namespace DatasetListResponseItem { + /** + * A dataset that can be obtained from a URI. + */ + export interface UriDataSource { + type: 'uri'; + + /** + * The dataset can be obtained from a URI. E.g. - + * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - + * "data:csv;base64,{base64_content}" + */ + uri: string; + } + + /** + * A dataset stored in rows. + */ + export interface RowsDataSource { + /** + * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", + * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} + * ] + */ + rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + type: 'rows'; + } + } +} + +/** + * A generic paginated response that follows a simple format. + */ +export interface DatasetIterrowsResponse { + /** + * The list of items for the current page + */ + data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + /** + * Whether there are more items available after this set + */ + has_more: boolean; + + /** + * The URL for accessing this list + */ + url?: string; +} + +export interface DatasetRegisterResponse { identifier: string; metadata: { [key: string]: boolean | number | string | Array | unknown | null }; @@ -80,20 +232,65 @@ export interface Dataset { /** * A dataset that can be obtained from a URI. */ - source: DataSource; + source: DatasetRegisterResponse.UriDataSource | DatasetRegisterResponse.RowsDataSource; type: 'dataset'; provider_resource_id?: string; } -export interface DatasetListResponse { - data: Array; +export namespace DatasetRegisterResponse { + /** + * A dataset that can be obtained from a URI. + */ + export interface UriDataSource { + type: 'uri'; + + /** + * The dataset can be obtained from a URI. E.g. - + * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - + * "data:csv;base64,{base64_content}" + */ + uri: string; + } + + /** + * A dataset stored in rows. + */ + export interface RowsDataSource { + /** + * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", + * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} + * ] + */ + rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + type: 'rows'; + } +} + +export interface DatasetAppendrowsParams { + /** + * The rows to append to the dataset. + */ + rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; +} + +export interface DatasetIterrowsParams { + /** + * The number of rows to get. + */ + limit?: number; + + /** + * Index into dataset for the first row to get. Get all rows if None. + */ + start_index?: number; } -export interface DatasetCreateParams { +export interface DatasetRegisterParams { /** - * The purpose of the dataset. One of - "post-training/messages": The dataset + * The purpose of the dataset. One of: - "post-training/messages": The dataset * contains a messages column with list of messages for post-training. { * "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": * "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The @@ -117,7 +314,7 @@ export interface DatasetCreateParams { * { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": * "assistant", "content": "Hello, world!"}, ] } ] } */ - source: DataSource; + source: DatasetRegisterParams.UriDataSource | DatasetRegisterParams.RowsDataSource; /** * The ID of the dataset. If not provided, an ID will be generated. @@ -125,16 +322,50 @@ export interface DatasetCreateParams { dataset_id?: string; /** - * The metadata for the dataset. - E.g. {"description": "My dataset"} + * The metadata for the dataset. - E.g. {"description": "My dataset"}. */ metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; } +export namespace DatasetRegisterParams { + /** + * A dataset that can be obtained from a URI. + */ + export interface UriDataSource { + type: 'uri'; + + /** + * The dataset can be obtained from a URI. E.g. - + * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - + * "data:csv;base64,{base64_content}" + */ + uri: string; + } + + /** + * A dataset stored in rows. + */ + export interface RowsDataSource { + /** + * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", + * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} + * ] + */ + rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + type: 'rows'; + } +} + export declare namespace Datasets { export { - type DataSource as DataSource, - type Dataset as Dataset, + type ListDatasetsResponse as ListDatasetsResponse, + type DatasetRetrieveResponse as DatasetRetrieveResponse, type DatasetListResponse as DatasetListResponse, - type DatasetCreateParams as DatasetCreateParams, + type DatasetIterrowsResponse as DatasetIterrowsResponse, + type DatasetRegisterResponse as DatasetRegisterResponse, + type DatasetAppendrowsParams as DatasetAppendrowsParams, + type DatasetIterrowsParams as DatasetIterrowsParams, + type DatasetRegisterParams as DatasetRegisterParams, }; } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts new file mode 100644 index 0000000..89758af --- /dev/null +++ b/src/resources/embeddings.ts @@ -0,0 +1,119 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../resource'; +import * as Core from '../core'; + +export class Embeddings extends APIResource { + /** + * Generate OpenAI-compatible embeddings for the given input using the specified + * model. + */ + create( + body: EmbeddingCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/v1/openai/v1/embeddings', { body, ...options }); + } +} + +/** + * Response from an OpenAI-compatible embeddings request. + */ +export interface CreateEmbeddingsResponse { + /** + * List of embedding data objects + */ + data: Array; + + /** + * The model that was used to generate the embeddings + */ + model: string; + + /** + * The object type, which will be "list" + */ + object: 'list'; + + /** + * Usage information + */ + usage: CreateEmbeddingsResponse.Usage; +} + +export namespace CreateEmbeddingsResponse { + /** + * A single embedding data object from an OpenAI-compatible embeddings response. + */ + export interface Data { + /** + * The embedding vector as a list of floats (when encoding_format="float") or as a + * base64-encoded string (when encoding_format="base64") + */ + embedding: Array | string; + + /** + * The index of the embedding in the input list + */ + index: number; + + /** + * The object type, which will be "embedding" + */ + object: 'embedding'; + } + + /** + * Usage information + */ + export interface Usage { + /** + * The number of tokens in the input + */ + prompt_tokens: number; + + /** + * The total number of tokens used + */ + total_tokens: number; + } +} + +export interface EmbeddingCreateParams { + /** + * Input text to embed, encoded as a string or array of strings. To embed multiple + * inputs in a single request, pass an array of strings. + */ + input: string | Array; + + /** + * The identifier of the model to use. The model must be an embedding model + * registered with Llama Stack and available via the /models endpoint. + */ + model: string; + + /** + * (Optional) The number of dimensions the resulting output embeddings should have. + * Only supported in text-embedding-3 and later models. + */ + dimensions?: number; + + /** + * (Optional) The format to return the embeddings in. Can be either "float" or + * "base64". Defaults to "float". + */ + encoding_format?: string; + + /** + * (Optional) A unique identifier representing your end-user, which can help OpenAI + * to monitor and detect abuse. + */ + user?: string; +} + +export declare namespace Embeddings { + export { + type CreateEmbeddingsResponse as CreateEmbeddingsResponse, + type EmbeddingCreateParams as EmbeddingCreateParams, + }; +} diff --git a/src/resources/eval/benchmarks/benchmarks.ts b/src/resources/eval/benchmarks/benchmarks.ts deleted file mode 100644 index 713f759..0000000 --- a/src/resources/eval/benchmarks/benchmarks.ts +++ /dev/null @@ -1,206 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../../core/resource'; -import * as InferenceAPI from '../../inference'; -import * as ScoringFunctionsAPI from '../../scoring-functions'; -import * as AgentsAPI from '../../agents/agents'; -import * as JobsAPI from './jobs'; -import { Job, JobCancelParams, JobResultParams, JobRetrieveParams, JobRunParams, Jobs } from './jobs'; -import { APIPromise } from '../../../core/api-promise'; -import { buildHeaders } from '../../../internal/headers'; -import { RequestOptions } from '../../../internal/request-options'; -import { path } from '../../../internal/utils/path'; - -export class Benchmarks extends APIResource { - jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); - - create(body: BenchmarkCreateParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/eval/benchmarks', { - body, - ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), - }); - } - - retrieve(benchmarkID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/eval/benchmarks/${benchmarkID}`, options); - } - - list(options?: RequestOptions): APIPromise { - return this._client.get('/v1/eval/benchmarks', options); - } - - /** - * Evaluate a list of rows on a benchmark. - */ - evaluate( - benchmarkID: string, - body: BenchmarkEvaluateParams, - options?: RequestOptions, - ): APIPromise { - return this._client.post(path`/v1/eval/benchmarks/${benchmarkID}/evaluations`, { body, ...options }); - } -} - -export interface Benchmark { - dataset_id: string; - - identifier: string; - - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_id: string; - - scoring_functions: Array; - - type: 'benchmark'; - - provider_resource_id?: string; -} - -/** - * A benchmark configuration for evaluation. - */ -export interface BenchmarkConfig { - /** - * The candidate to evaluate. - */ - eval_candidate: BenchmarkConfig.ModelCandidate | BenchmarkConfig.AgentCandidate; - - /** - * Map between scoring function id and parameters for each scoring function you - * want to run - */ - scoring_params: { [key: string]: ScoringFunctionsAPI.ScoringFnParams }; - - /** - * (Optional) The number of examples to evaluate. If not provided, all examples in - * the dataset will be evaluated - */ - num_examples?: number; -} - -export namespace BenchmarkConfig { - /** - * A model candidate for evaluation. - */ - export interface ModelCandidate { - /** - * The model ID to evaluate. - */ - model: string; - - /** - * The sampling parameters for the model. - */ - sampling_params: InferenceAPI.SamplingParams; - - type: 'model'; - - /** - * (Optional) The system message providing instructions or context to the model. - */ - system_message?: InferenceAPI.SystemMessage; - } - - /** - * An agent candidate for evaluation. - */ - export interface AgentCandidate { - /** - * The configuration for the agent candidate. - */ - config: AgentsAPI.AgentConfig; - - type: 'agent'; - } -} - -/** - * The response from an evaluation. - */ -export interface EvaluateResponse { - /** - * The generations from the evaluation. - */ - generations: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * The scores from the evaluation. - */ - scores: { [key: string]: EvaluateResponse.Scores }; -} - -export namespace EvaluateResponse { - /** - * A scoring result for a single row. - */ - export interface Scores { - /** - * Map of metric name to aggregated value - */ - aggregated_results: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The scoring result for each row. Each row is a map of column name to value. - */ - score_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - } -} - -export interface BenchmarkListResponse { - data: Array; -} - -export interface BenchmarkCreateParams { - benchmark_id: string; - - dataset_id: string; - - scoring_functions: Array; - - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_benchmark_id?: string; - - provider_id?: string; -} - -export interface BenchmarkEvaluateParams { - /** - * The configuration for the benchmark. - */ - benchmark_config: BenchmarkConfig; - - /** - * The rows to evaluate. - */ - input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * The scoring functions to use for the evaluation. - */ - scoring_functions: Array; -} - -Benchmarks.Jobs = Jobs; - -export declare namespace Benchmarks { - export { - type Benchmark as Benchmark, - type BenchmarkConfig as BenchmarkConfig, - type EvaluateResponse as EvaluateResponse, - type BenchmarkListResponse as BenchmarkListResponse, - type BenchmarkCreateParams as BenchmarkCreateParams, - type BenchmarkEvaluateParams as BenchmarkEvaluateParams, - }; - - export { - Jobs as Jobs, - type Job as Job, - type JobRetrieveParams as JobRetrieveParams, - type JobCancelParams as JobCancelParams, - type JobResultParams as JobResultParams, - type JobRunParams as JobRunParams, - }; -} diff --git a/src/resources/eval/benchmarks/index.ts b/src/resources/eval/benchmarks/index.ts deleted file mode 100644 index 75c5e3b..0000000 --- a/src/resources/eval/benchmarks/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - Benchmarks, - type Benchmark, - type BenchmarkConfig, - type EvaluateResponse, - type BenchmarkListResponse, - type BenchmarkCreateParams, - type BenchmarkEvaluateParams, -} from './benchmarks'; -export { - Jobs, - type Job, - type JobRetrieveParams, - type JobCancelParams, - type JobResultParams, - type JobRunParams, -} from './jobs'; diff --git a/src/resources/eval/benchmarks/jobs.ts b/src/resources/eval/benchmarks/jobs.ts deleted file mode 100644 index 1ef10e2..0000000 --- a/src/resources/eval/benchmarks/jobs.ts +++ /dev/null @@ -1,92 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../../core/resource'; -import * as BenchmarksAPI from './benchmarks'; -import { APIPromise } from '../../../core/api-promise'; -import { buildHeaders } from '../../../internal/headers'; -import { RequestOptions } from '../../../internal/request-options'; -import { path } from '../../../internal/utils/path'; - -export class Jobs extends APIResource { - /** - * Get the status of a job. - */ - retrieve(jobID: string, params: JobRetrieveParams, options?: RequestOptions): APIPromise { - const { benchmark_id } = params; - return this._client.get(path`/v1/eval/benchmarks/${benchmark_id}/jobs/${jobID}`, options); - } - - /** - * Cancel a job. - */ - cancel(jobID: string, params: JobCancelParams, options?: RequestOptions): APIPromise { - const { benchmark_id } = params; - return this._client.delete(path`/v1/eval/benchmarks/${benchmark_id}/jobs/${jobID}`, { - ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), - }); - } - - /** - * Get the result of a job. - */ - result( - jobID: string, - params: JobResultParams, - options?: RequestOptions, - ): APIPromise { - const { benchmark_id } = params; - return this._client.get(path`/v1/eval/benchmarks/${benchmark_id}/jobs/${jobID}/result`, options); - } - - /** - * Run an evaluation on a benchmark. - */ - run(benchmarkID: string, body: JobRunParams, options?: RequestOptions): APIPromise { - return this._client.post(path`/v1/eval/benchmarks/${benchmarkID}/jobs`, { body, ...options }); - } -} - -export interface Job { - job_id: string; - - status: 'completed' | 'in_progress' | 'failed' | 'scheduled' | 'cancelled'; -} - -export interface JobRetrieveParams { - /** - * The ID of the benchmark to run the evaluation on. - */ - benchmark_id: string; -} - -export interface JobCancelParams { - /** - * The ID of the benchmark to run the evaluation on. - */ - benchmark_id: string; -} - -export interface JobResultParams { - /** - * The ID of the benchmark to run the evaluation on. - */ - benchmark_id: string; -} - -export interface JobRunParams { - /** - * The configuration for the benchmark. - */ - benchmark_config: BenchmarksAPI.BenchmarkConfig; -} - -export declare namespace Jobs { - export { - type Job as Job, - type JobRetrieveParams as JobRetrieveParams, - type JobCancelParams as JobCancelParams, - type JobResultParams as JobResultParams, - type JobRunParams as JobRunParams, - }; -} diff --git a/src/resources/eval/eval.ts b/src/resources/eval/eval.ts index 4237129..a2f277e 100644 --- a/src/resources/eval/eval.ts +++ b/src/resources/eval/eval.ts @@ -1,31 +1,201 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../core/resource'; -import * as BenchmarksAPI from './benchmarks/benchmarks'; -import { - Benchmark, - BenchmarkConfig, - BenchmarkCreateParams, - BenchmarkEvaluateParams, - BenchmarkListResponse, - Benchmarks, - EvaluateResponse, -} from './benchmarks/benchmarks'; +import { APIResource } from '../../resource'; +import * as Core from '../../core'; +import * as ScoringFunctionsAPI from '../scoring-functions'; +import * as Shared from '../shared'; +import * as JobsAPI from './jobs'; +import { Jobs } from './jobs'; export class Eval extends APIResource { - benchmarks: BenchmarksAPI.Benchmarks = new BenchmarksAPI.Benchmarks(this._client); + jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); + + /** + * Evaluate a list of rows on a benchmark. + */ + evaluateRows( + benchmarkId: string, + body: EvalEvaluateRowsParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options }); + } + + /** + * Evaluate a list of rows on a benchmark. + */ + evaluateRowsAlpha( + benchmarkId: string, + body: EvalEvaluateRowsAlphaParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options }); + } + + /** + * Run an evaluation on a benchmark. + */ + runEval(benchmarkId: string, body: EvalRunEvalParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options }); + } + + /** + * Run an evaluation on a benchmark. + */ + runEvalAlpha( + benchmarkId: string, + body: EvalRunEvalAlphaParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options }); + } +} + +/** + * A benchmark configuration for evaluation. + */ +export interface BenchmarkConfig { + /** + * The candidate to evaluate. + */ + eval_candidate: EvalCandidate; + + /** + * Map between scoring function id and parameters for each scoring function you + * want to run + */ + scoring_params: { [key: string]: ScoringFunctionsAPI.ScoringFnParams }; + + /** + * (Optional) The number of examples to evaluate. If not provided, all examples in + * the dataset will be evaluated + */ + num_examples?: number; +} + +/** + * A model candidate for evaluation. + */ +export type EvalCandidate = EvalCandidate.ModelCandidate | EvalCandidate.AgentCandidate; + +export namespace EvalCandidate { + /** + * A model candidate for evaluation. + */ + export interface ModelCandidate { + /** + * The model ID to evaluate. + */ + model: string; + + /** + * The sampling parameters for the model. + */ + sampling_params: Shared.SamplingParams; + + type: 'model'; + + /** + * (Optional) The system message providing instructions or context to the model. + */ + system_message?: Shared.SystemMessage; + } + + /** + * An agent candidate for evaluation. + */ + export interface AgentCandidate { + /** + * The configuration for the agent candidate. + */ + config: Shared.AgentConfig; + + type: 'agent'; + } } -Eval.Benchmarks = Benchmarks; +/** + * The response from an evaluation. + */ +export interface EvaluateResponse { + /** + * The generations from the evaluation. + */ + generations: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + /** + * The scores from the evaluation. + */ + scores: { [key: string]: Shared.ScoringResult }; +} + +export interface Job { + job_id: string; + + status: 'completed' | 'in_progress' | 'failed' | 'scheduled' | 'cancelled'; +} + +export interface EvalEvaluateRowsParams { + /** + * The configuration for the benchmark. + */ + benchmark_config: BenchmarkConfig; + + /** + * The rows to evaluate. + */ + input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + /** + * The scoring functions to use for the evaluation. + */ + scoring_functions: Array; +} + +export interface EvalEvaluateRowsAlphaParams { + /** + * The configuration for the benchmark. + */ + benchmark_config: BenchmarkConfig; + + /** + * The rows to evaluate. + */ + input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + /** + * The scoring functions to use for the evaluation. + */ + scoring_functions: Array; +} + +export interface EvalRunEvalParams { + /** + * The configuration for the benchmark. + */ + benchmark_config: BenchmarkConfig; +} + +export interface EvalRunEvalAlphaParams { + /** + * The configuration for the benchmark. + */ + benchmark_config: BenchmarkConfig; +} + +Eval.Jobs = Jobs; export declare namespace Eval { export { - Benchmarks as Benchmarks, - type Benchmark as Benchmark, type BenchmarkConfig as BenchmarkConfig, + type EvalCandidate as EvalCandidate, type EvaluateResponse as EvaluateResponse, - type BenchmarkListResponse as BenchmarkListResponse, - type BenchmarkCreateParams as BenchmarkCreateParams, - type BenchmarkEvaluateParams as BenchmarkEvaluateParams, + type Job as Job, + type EvalEvaluateRowsParams as EvalEvaluateRowsParams, + type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams, + type EvalRunEvalParams as EvalRunEvalParams, + type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams, }; + + export { Jobs as Jobs }; } diff --git a/src/resources/eval/index.ts b/src/resources/eval/index.ts index 859e0d1..e8c35f3 100644 --- a/src/resources/eval/index.ts +++ b/src/resources/eval/index.ts @@ -1,12 +1,14 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - Benchmarks, - type Benchmark, + Eval, type BenchmarkConfig, + type EvalCandidate, type EvaluateResponse, - type BenchmarkListResponse, - type BenchmarkCreateParams, - type BenchmarkEvaluateParams, -} from './benchmarks/index'; -export { Eval } from './eval'; + type Job, + type EvalEvaluateRowsParams, + type EvalEvaluateRowsAlphaParams, + type EvalRunEvalParams, + type EvalRunEvalAlphaParams, +} from './eval'; +export { Jobs } from './jobs'; diff --git a/src/resources/eval/jobs.ts b/src/resources/eval/jobs.ts new file mode 100644 index 0000000..13d4a4d --- /dev/null +++ b/src/resources/eval/jobs.ts @@ -0,0 +1,35 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as Core from '../../core'; +import * as EvalAPI from './eval'; + +export class Jobs extends APIResource { + /** + * Get the result of a job. + */ + retrieve( + benchmarkId: string, + jobId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}/result`, options); + } + + /** + * Cancel a job. + */ + cancel(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, { + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } + + /** + * Get the status of a job. + */ + status(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, options); + } +} diff --git a/src/resources/files.ts b/src/resources/files.ts index 46a5299..3141c6c 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -1,3 +1,184 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export * from './files/index'; +import { APIResource } from '../resource'; +import { isRequestOptions } from '../core'; +import * as Core from '../core'; + +export class Files extends APIResource { + /** + * Upload a file that can be used across various endpoints. The file upload should + * be a multipart form request with: + * + * - file: The File object (not file name) to be uploaded. + * - purpose: The intended purpose of the uploaded file. + */ + create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/v1/openai/v1/files', Core.multipartFormRequestOptions({ body, ...options })); + } + + /** + * Returns information about a specific file. + */ + retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/openai/v1/files/${fileId}`, options); + } + + /** + * Returns a list of files that belong to the user's organization. + */ + list(query?: FileListParams, options?: Core.RequestOptions): Core.APIPromise; + list(options?: Core.RequestOptions): Core.APIPromise; + list( + query: FileListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.get('/v1/openai/v1/files', { query, ...options }); + } + + /** + * Delete a file. + */ + delete(fileId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/openai/v1/files/${fileId}`, options); + } + + /** + * Returns the contents of the specified file. + */ + content(fileId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/openai/v1/files/${fileId}/content`, options); + } +} + +/** + * Response for deleting a file in OpenAI Files API. + */ +export interface DeleteFileResponse { + /** + * The file identifier that was deleted + */ + id: string; + + /** + * Whether the file was successfully deleted + */ + deleted: boolean; + + /** + * The object type, which is always "file" + */ + object: 'file'; +} + +/** + * OpenAI File object as defined in the OpenAI Files API. + */ +export interface File { + /** + * The file identifier, which can be referenced in the API endpoints + */ + id: string; + + /** + * The size of the file, in bytes + */ + bytes: number; + + /** + * The Unix timestamp (in seconds) for when the file was created + */ + created_at: number; + + /** + * The Unix timestamp (in seconds) for when the file expires + */ + expires_at: number; + + /** + * The name of the file + */ + filename: string; + + /** + * The object type, which is always "file" + */ + object: 'file'; + + /** + * The intended purpose of the file + */ + purpose: 'assistants'; +} + +/** + * Response for listing files in OpenAI Files API. + */ +export interface ListFilesResponse { + /** + * List of file objects + */ + data: Array; + + first_id: string; + + has_more: boolean; + + last_id: string; + + /** + * The object type, which is always "list" + */ + object: 'list'; +} + +export type FileContentResponse = unknown; + +export interface FileCreateParams { + file: Core.Uploadable; + + /** + * Valid purpose values for OpenAI Files API. + */ + purpose: 'assistants'; +} + +export interface FileListParams { + /** + * A cursor for use in pagination. `after` is an object ID that defines your place + * in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include after=obj_foo in order to + * fetch the next page of the list. + */ + after?: string; + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and + * 10,000, and the default is 10,000. + */ + limit?: number; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: 'asc' | 'desc'; + + /** + * Only return files with the given purpose. + */ + purpose?: 'assistants'; +} + +export declare namespace Files { + export { + type DeleteFileResponse as DeleteFileResponse, + type File as File, + type ListFilesResponse as ListFilesResponse, + type FileContentResponse as FileContentResponse, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; +} diff --git a/src/resources/files/files.ts b/src/resources/files/files.ts deleted file mode 100644 index 829d24b..0000000 --- a/src/resources/files/files.ts +++ /dev/null @@ -1,196 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../core/resource'; -import * as SessionAPI from './session'; -import { Session, SessionUploadContentParams } from './session'; -import { APIPromise } from '../../core/api-promise'; -import { buildHeaders } from '../../internal/headers'; -import { RequestOptions } from '../../internal/request-options'; -import { path } from '../../internal/utils/path'; - -export class Files extends APIResource { - session: SessionAPI.Session = new SessionAPI.Session(this._client); - - /** - * Get a file info identified by a bucket and key. - */ - retrieve(key: string, params: FileRetrieveParams, options?: RequestOptions): APIPromise { - const { bucket } = params; - return this._client.get(path`/v1/files/${bucket}/${key}`, options); - } - - /** - * List all buckets. - */ - list(query: FileListParams, options?: RequestOptions): APIPromise { - return this._client.get('/v1/files', { query, ...options }); - } - - /** - * Delete a file identified by a bucket and key. - */ - delete(key: string, params: FileDeleteParams, options?: RequestOptions): APIPromise { - const { bucket } = params; - return this._client.delete(path`/v1/files/${bucket}/${key}`, { - ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), - }); - } - - /** - * Create a new upload session for a file identified by a bucket and key. - */ - createUploadSession(body: FileCreateUploadSessionParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/files', { body, ...options }); - } - - /** - * List all files in a bucket. - */ - listInBucket(bucket: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/files/${bucket}`, options); - } -} - -/** - * Response representing a file entry. - */ -export interface File { - /** - * Bucket under which the file is stored (valid chars: a-zA-Z0-9\_-) - */ - bucket: string; - - /** - * Size of the file in bytes - */ - bytes: number; - - /** - * Timestamp of when the file was created - */ - created_at: number; - - /** - * Key under which the file is stored (valid chars: a-zA-Z0-9\_-/.) - */ - key: string; - - /** - * MIME type of the file - */ - mime_type: string; - - /** - * Upload URL for the file contents - */ - url: string; -} - -/** - * Response after initiating a file upload session. - */ -export interface FileUpload { - /** - * ID of the upload session - */ - id: string; - - /** - * Upload content offset - */ - offset: number; - - /** - * Upload content size - */ - size: number; - - /** - * Upload URL for the file or file parts - */ - url: string; -} - -/** - * Response representing a list of file entries. - */ -export interface FileListResponse { - /** - * List of FileResponse entries - */ - data: Array; -} - -export namespace FileListResponse { - export interface Data { - name: string; - } -} - -/** - * Response representing a list of file entries. - */ -export interface FileListInBucketResponse { - /** - * List of FileResponse entries - */ - data: Array; -} - -export interface FileRetrieveParams { - /** - * Bucket name (valid chars: a-zA-Z0-9\_-) - */ - bucket: string; -} - -export interface FileListParams { - bucket: string; -} - -export interface FileDeleteParams { - /** - * Bucket name (valid chars: a-zA-Z0-9\_-) - */ - bucket: string; -} - -export interface FileCreateUploadSessionParams { - /** - * Bucket under which the file is stored (valid chars: a-zA-Z0-9\_-) - */ - bucket: string; - - /** - * Key under which the file is stored (valid chars: a-zA-Z0-9\_-/.) - */ - key: string; - - /** - * MIME type of the file - */ - mime_type: string; - - /** - * File size in bytes - */ - size: number; -} - -Files.Session = Session; - -export declare namespace Files { - export { - type File as File, - type FileUpload as FileUpload, - type FileListResponse as FileListResponse, - type FileListInBucketResponse as FileListInBucketResponse, - type FileRetrieveParams as FileRetrieveParams, - type FileListParams as FileListParams, - type FileDeleteParams as FileDeleteParams, - type FileCreateUploadSessionParams as FileCreateUploadSessionParams, - }; - - export { Session as Session, type SessionUploadContentParams as SessionUploadContentParams }; -} diff --git a/src/resources/files/index.ts b/src/resources/files/index.ts deleted file mode 100644 index bdd87ff..0000000 --- a/src/resources/files/index.ts +++ /dev/null @@ -1,14 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - Files, - type File, - type FileUpload, - type FileListResponse, - type FileListInBucketResponse, - type FileRetrieveParams, - type FileListParams, - type FileDeleteParams, - type FileCreateUploadSessionParams, -} from './files'; -export { Session, type SessionUploadContentParams } from './session'; diff --git a/src/resources/files/session.ts b/src/resources/files/session.ts deleted file mode 100644 index 4abe468..0000000 --- a/src/resources/files/session.ts +++ /dev/null @@ -1,42 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../core/resource'; -import * as FilesAPI from './files'; -import { APIPromise } from '../../core/api-promise'; -import { buildHeaders } from '../../internal/headers'; -import { RequestOptions } from '../../internal/request-options'; -import { path } from '../../internal/utils/path'; - -export class Session extends APIResource { - /** - * Returns information about an existsing upload session - */ - retrieve(uploadID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/files/session:${uploadID}`, options); - } - - /** - * Upload file content to an existing upload session. On the server, request body - * will have the raw bytes that are uploaded. - */ - uploadContent( - uploadID: string, - params: SessionUploadContentParams, - options?: RequestOptions, - ): APIPromise { - const { body } = params; - return this._client.post(path`/v1/files/session:${uploadID}`, { - body: body, - ...options, - headers: buildHeaders([{ 'Content-Type': 'application/octet-stream' }, options?.headers]), - }); - } -} - -export interface SessionUploadContentParams { - body: string | ArrayBuffer | ArrayBufferView | Blob | DataView; -} - -export declare namespace Session { - export { type SessionUploadContentParams as SessionUploadContentParams }; -} diff --git a/src/resources/health.ts b/src/resources/health.ts deleted file mode 100644 index 0dada08..0000000 --- a/src/resources/health.ts +++ /dev/null @@ -1,19 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; - -export class Health extends APIResource { - check(options?: RequestOptions): APIPromise { - return this._client.get('/v1/health', options); - } -} - -export interface HealthCheckResponse { - status: 'OK' | 'Error' | 'Not Implemented'; -} - -export declare namespace Health { - export { type HealthCheckResponse as HealthCheckResponse }; -} diff --git a/src/resources/index.ts b/src/resources/index.ts index 835cefb..743bbd9 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -1,83 +1,112 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export * from './shared'; export { Agents, - type Agent, - type AgentConfig, + type InferenceStep, + type MemoryRetrievalStep, + type ShieldCallStep, + type ToolExecutionStep, + type ToolResponse, type AgentCreateResponse, + type AgentRetrieveResponse, type AgentListResponse, - type AgentListSessionsResponse, type AgentCreateParams, + type AgentListParams, } from './agents/agents'; export { - Datasetio, - type DatasetioIterateRowsResponse, - type DatasetioAppendRowsParams, - type DatasetioIterateRowsParams, -} from './datasetio'; + Benchmarks, + type Benchmark, + type ListBenchmarksResponse, + type BenchmarkListResponse, + type BenchmarkRegisterParams, +} from './benchmarks'; +export { Chat, type ChatCompletionChunk } from './chat/chat'; +export { + Completions, + type CompletionCreateResponse, + type CompletionCreateParams, + type CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming, +} from './completions'; export { Datasets, - type DataSource, - type Dataset, + type ListDatasetsResponse, + type DatasetRetrieveResponse, type DatasetListResponse, - type DatasetCreateParams, + type DatasetIterrowsResponse, + type DatasetRegisterResponse, + type DatasetAppendrowsParams, + type DatasetIterrowsParams, + type DatasetRegisterParams, } from './datasets'; -export { Eval } from './eval/eval'; +export { Embeddings, type CreateEmbeddingsResponse, type EmbeddingCreateParams } from './embeddings'; +export { + Eval, + type BenchmarkConfig, + type EvalCandidate, + type EvaluateResponse, + type Job, + type EvalEvaluateRowsParams, + type EvalEvaluateRowsAlphaParams, + type EvalRunEvalParams, + type EvalRunEvalAlphaParams, +} from './eval/eval'; export { Files, + type DeleteFileResponse, type File, - type FileUpload, - type FileListResponse, - type FileListInBucketResponse, - type FileRetrieveParams, + type ListFilesResponse, + type FileContentResponse, + type FileCreateParams, type FileListParams, - type FileDeleteParams, - type FileCreateUploadSessionParams, -} from './files/files'; -export { Health, type HealthCheckResponse } from './health'; +} from './files'; export { Inference, - type ChatCompletionResponse, - type CompletionMessage, + type ChatCompletionResponseStreamChunk, type CompletionResponse, - type InterleavedContent, - type InterleavedContentItem, - type Message, - type MetricInResponse, - type ResponseFormat, - type SamplingParams, - type SystemMessage, + type EmbeddingsResponse, type TokenLogProbs, - type ToolCall, - type ToolConfig, - type ToolDefinition, type InferenceBatchChatCompletionResponse, - type InferenceBatchCompletionResponse, - type InferenceEmbeddingsResponse, type InferenceBatchChatCompletionParams, type InferenceBatchCompletionParams, type InferenceChatCompletionParams, + type InferenceChatCompletionParamsNonStreaming, + type InferenceChatCompletionParamsStreaming, type InferenceCompletionParams, + type InferenceCompletionParamsNonStreaming, + type InferenceCompletionParamsStreaming, type InferenceEmbeddingsParams, } from './inference'; -export { Inspect, type InspectListRoutesResponse } from './inspect'; -export { Models, type Model, type ModelType, type ModelListResponse, type ModelCreateParams } from './models'; -export { OpenAI } from './openai/openai'; +export { Inspect, type HealthInfo, type ProviderInfo, type RouteInfo, type VersionInfo } from './inspect'; +export { + Models, + type ListModelsResponse, + type Model, + type ModelListResponse, + type ModelRegisterParams, +} from './models'; export { PostTraining, + type AlgorithmConfig, + type ListPostTrainingJobsResponse, type PostTrainingJob, - type TrainingConfig, - type PostTrainingListJobsResponse, - type PostTrainingFineTuneSupervisedParams, - type PostTrainingOptimizePreferencesParams, + type PostTrainingPreferenceOptimizeParams, + type PostTrainingSupervisedFineTuneParams, } from './post-training/post-training'; -export { Providers, type ProviderInfo, type ProviderListResponse } from './providers'; +export { Providers, type ListProvidersResponse, type ProviderListResponse } from './providers'; export { - Safety, - type SafetyViolation, - type SafetyRunShieldResponse, - type SafetyRunShieldParams, -} from './safety'; + Responses, + type ResponseObject, + type ResponseObjectStream, + type ResponseListResponse, + type ResponseCreateParams, + type ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming, + type ResponseListParams, +} from './responses/responses'; +export { Routes, type ListRoutesResponse, type RouteListResponse } from './routes'; +export { Safety, type RunShieldResponse, type SafetyRunShieldParams } from './safety'; export { Scoring, type ScoringScoreResponse, @@ -87,47 +116,85 @@ export { } from './scoring'; export { ScoringFunctions, - type AggregationFunctionType, - type ParamType, + type ListScoringFunctionsResponse, type ScoringFn, type ScoringFnParams, - type ScoringFnParamsType, type ScoringFunctionListResponse, - type ScoringFunctionCreateParams, + type ScoringFunctionRegisterParams, } from './scoring-functions'; -export { Shields, type Shield, type ShieldListResponse, type ShieldCreateParams } from './shields'; +export { + Shields, + type ListShieldsResponse, + type Shield, + type ShieldListResponse, + type ShieldRegisterParams, +} from './shields'; export { SyntheticDataGeneration, - type SyntheticDataGenerationGenerateResponse, + type SyntheticDataGenerationResponse, type SyntheticDataGenerationGenerateParams, } from './synthetic-data-generation'; export { Telemetry, - type EventType, - type StructuredLogType, - type TelemetryCreateEventParams, -} from './telemetry/telemetry'; + type Event, + type QueryCondition, + type QuerySpansResponse, + type SpanWithStatus, + type Trace, + type TelemetryGetSpanResponse, + type TelemetryGetSpanTreeResponse, + type TelemetryQuerySpansResponse, + type TelemetryQueryTracesResponse, + type TelemetryGetSpanTreeParams, + type TelemetryLogEventParams, + type TelemetryQuerySpansParams, + type TelemetryQueryTracesParams, + type TelemetrySaveSpansToDatasetParams, +} from './telemetry'; export { ToolRuntime, type ToolDef, - type URL, - type ToolRuntimeInvokeToolResponse, + type ToolInvocationResult, type ToolRuntimeListToolsResponse, type ToolRuntimeInvokeToolParams, type ToolRuntimeListToolsParams, } from './tool-runtime/tool-runtime'; export { Toolgroups, + type ListToolGroupsResponse, type ToolGroup, type ToolgroupListResponse, type ToolgroupRegisterParams, } from './toolgroups'; -export { Tools, type Tool, type ToolParameter, type ToolListResponse, type ToolListParams } from './tools'; -export { VectorDBs, type VectorDB, type VectorDBListResponse, type VectorDBCreateParams } from './vector-dbs'; +export { + Tools, + type ListToolsResponse, + type Tool, + type ToolListResponse, + type ToolListParams, +} from './tools'; +export { + VectorDBs, + type ListVectorDBsResponse, + type VectorDBRetrieveResponse, + type VectorDBListResponse, + type VectorDBRegisterResponse, + type VectorDBRegisterParams, +} from './vector-dbs'; export { VectorIo, - type VectorIoQueryResponse, + type QueryChunksResponse, type VectorIoInsertParams, type VectorIoQueryParams, } from './vector-io'; -export { Version, type VersionRetrieveResponse } from './version'; +export { + VectorStores, + type ListVectorStoresResponse, + type VectorStore, + type VectorStoreDeleteResponse, + type VectorStoreSearchResponse, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, + type VectorStoreSearchParams, +} from './vector-stores/vector-stores'; diff --git a/src/resources/inference.ts b/src/resources/inference.ts index a361744..bdf6ee7 100644 --- a/src/resources/inference.ts +++ b/src/resources/inference.ts @@ -1,41 +1,83 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import * as ToolRuntimeAPI from './tool-runtime/tool-runtime'; -import * as TurnAPI from './agents/session/turn/turn'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; +import { APIResource } from '../resource'; +import { APIPromise } from '../core'; +import * as Core from '../core'; +import * as InferenceAPI from './inference'; +import * as Shared from './shared'; +import { Stream } from '../streaming'; export class Inference extends APIResource { + /** + * Generate chat completions for a batch of messages using the specified model. + */ batchChatCompletion( body: InferenceBatchChatCompletionParams, - options?: RequestOptions, - ): APIPromise { + options?: Core.RequestOptions, + ): Core.APIPromise { return this._client.post('/v1/inference/batch-chat-completion', { body, ...options }); } + /** + * Generate completions for a batch of content using the specified model. + */ batchCompletion( body: InferenceBatchCompletionParams, - options?: RequestOptions, - ): APIPromise { + options?: Core.RequestOptions, + ): Core.APIPromise { return this._client.post('/v1/inference/batch-completion', { body, ...options }); } /** * Generate a chat completion for the given messages using the specified model. */ + chatCompletion( + body: InferenceChatCompletionParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + chatCompletion( + body: InferenceChatCompletionParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + chatCompletion( + body: InferenceChatCompletionParamsBase, + options?: Core.RequestOptions, + ): APIPromise | Shared.ChatCompletionResponse>; chatCompletion( body: InferenceChatCompletionParams, - options?: RequestOptions, - ): APIPromise { - return this._client.post('/v1/inference/chat-completion', { body, ...options }); + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return this._client.post('/v1/inference/chat-completion', { + body, + ...options, + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; } /** * Generate a completion for the given content using the specified model. */ - completion(body: InferenceCompletionParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/inference/completion', { body, ...options }); + completion( + body: InferenceCompletionParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + completion( + body: InferenceCompletionParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + completion( + body: InferenceCompletionParamsBase, + options?: Core.RequestOptions, + ): APIPromise | CompletionResponse>; + completion( + body: InferenceCompletionParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return this._client.post('/v1/inference/completion', { + body, + ...options, + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; } /** @@ -43,315 +85,102 @@ export class Inference extends APIResource { */ embeddings( body: InferenceEmbeddingsParams, - options?: RequestOptions, - ): APIPromise { + options?: Core.RequestOptions, + ): Core.APIPromise { return this._client.post('/v1/inference/embeddings', { body, ...options }); } } /** - * Response from a chat completion request. - */ -export interface ChatCompletionResponse { - /** - * The complete response message - */ - completion_message: CompletionMessage; - - /** - * Optional log probabilities for generated tokens - */ - logprobs?: Array; - - metrics?: Array; -} - -/** - * A message containing the model's (assistant) response in a chat conversation. + * A chunk of a streamed chat completion response. */ -export interface CompletionMessage { - /** - * The content of the model's response - */ - content: InterleavedContent; - - /** - * Must be "assistant" to identify this as the model's response - */ - role: 'assistant'; - - /** - * Reason why the model stopped generating. Options are: - - * `StopReason.end_of_turn`: The model finished generating the entire response. - - * `StopReason.end_of_message`: The model finished generating but generated a - * partial response -- usually, a tool call. The user may call the tool and - * continue the conversation with the tool's response. - - * `StopReason.out_of_tokens`: The model ran out of token budget. - */ - stop_reason: 'end_of_turn' | 'end_of_message' | 'out_of_tokens'; - - /** - * List of tool calls. Each tool call is a ToolCall object. - */ - tool_calls?: Array; -} - -/** - * Response from a completion request. - */ -export interface CompletionResponse { - /** - * The generated completion text - */ - content: string; - - /** - * Reason why generation stopped - */ - stop_reason: 'end_of_turn' | 'end_of_message' | 'out_of_tokens'; - - /** - * Optional log probabilities for generated tokens - */ - logprobs?: Array; - - metrics?: Array; -} - -/** - * A image content item - */ -export type InterleavedContent = - | string - | InterleavedContent.ImageContentItem - | InterleavedContent.TextContentItem - | Array; - -export namespace InterleavedContent { - /** - * A image content item - */ - export interface ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - image: ImageContentItem.Image; - - /** - * Discriminator type of the content item. Always "image" - */ - type: 'image'; - } - - export namespace ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - export interface Image { - /** - * base64 encoded image data as string - */ - data?: string; - - /** - * A URL of the image or data URL in the format of data:image/{type};base64,{data}. - * Note that URL could have length limits. - */ - url?: ToolRuntimeAPI.URL; - } - } - +export interface ChatCompletionResponseStreamChunk { /** - * A text content item + * The event containing the new content */ - export interface TextContentItem { - /** - * Text content - */ - text: string; + event: ChatCompletionResponseStreamChunk.Event; - /** - * Discriminator type of the content item. Always "text" - */ - type: 'text'; - } + metrics?: Array; } -/** - * A image content item - */ -export type InterleavedContentItem = - | InterleavedContentItem.ImageContentItem - | InterleavedContentItem.TextContentItem; - -export namespace InterleavedContentItem { +export namespace ChatCompletionResponseStreamChunk { /** - * A image content item + * The event containing the new content */ - export interface ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - image: ImageContentItem.Image; - + export interface Event { /** - * Discriminator type of the content item. Always "image" + * Content generated since last event. This can be one or more tokens, or a tool + * call. */ - type: 'image'; - } + delta: Shared.ContentDelta; - export namespace ImageContentItem { /** - * Image as a base64 encoded string or an URL + * Type of the event */ - export interface Image { - /** - * base64 encoded image data as string - */ - data?: string; - - /** - * A URL of the image or data URL in the format of data:image/{type};base64,{data}. - * Note that URL could have length limits. - */ - url?: ToolRuntimeAPI.URL; - } - } + event_type: 'start' | 'complete' | 'progress'; - /** - * A text content item - */ - export interface TextContentItem { /** - * Text content + * Optional log probabilities for generated tokens */ - text: string; + logprobs?: Array; /** - * Discriminator type of the content item. Always "text" + * Optional reason why generation stopped, if complete */ - type: 'text'; + stop_reason?: 'end_of_turn' | 'end_of_message' | 'out_of_tokens'; } -} - -/** - * A message from the user in a chat conversation. - */ -export type Message = TurnAPI.UserMessage | SystemMessage | TurnAPI.ToolResponseMessage | CompletionMessage; -export interface MetricInResponse { - metric: string; + export interface Metric { + metric: string; - value: number; + value: number; - unit?: string; -} - -/** - * Configuration for JSON schema-guided response generation. - */ -export type ResponseFormat = ResponseFormat.JsonSchemaResponseFormat | ResponseFormat.GrammarResponseFormat; - -export namespace ResponseFormat { - /** - * Configuration for JSON schema-guided response generation. - */ - export interface JsonSchemaResponseFormat { - /** - * The JSON schema the response should conform to. In a Python SDK, this is often a - * `pydantic` model. - */ - json_schema: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * Must be "json_schema" to identify this format type - */ - type: 'json_schema'; - } - - /** - * Configuration for grammar-guided response generation. - */ - export interface GrammarResponseFormat { - /** - * The BNF grammar specification the response should conform to - */ - bnf: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * Must be "grammar" to identify this format type - */ - type: 'grammar'; + unit?: string; } } /** - * Sampling parameters. + * Response from a completion request. */ -export interface SamplingParams { +export interface CompletionResponse { /** - * The sampling strategy. + * The generated completion text */ - strategy: - | SamplingParams.GreedySamplingStrategy - | SamplingParams.TopPSamplingStrategy - | SamplingParams.TopKSamplingStrategy; + content: string; /** - * The maximum number of tokens that can be generated in the completion. The token - * count of your prompt plus max_tokens cannot exceed the model's context length. + * Reason why generation stopped */ - max_tokens?: number; + stop_reason: 'end_of_turn' | 'end_of_message' | 'out_of_tokens'; /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on - * whether they appear in the text so far, increasing the model's likelihood to - * talk about new topics. + * Optional log probabilities for generated tokens */ - repetition_penalty?: number; + logprobs?: Array; - /** - * Up to 4 sequences where the API will stop generating further tokens. The - * returned text will not contain the stop sequence. - */ - stop?: Array; + metrics?: Array; } -export namespace SamplingParams { - export interface GreedySamplingStrategy { - type: 'greedy'; - } - - export interface TopPSamplingStrategy { - type: 'top_p'; +export namespace CompletionResponse { + export interface Metric { + metric: string; - temperature?: number; + value: number; - top_p?: number; - } - - export interface TopKSamplingStrategy { - top_k: number; - - type: 'top_k'; + unit?: string; } } /** - * A system message providing instructions or context to the model. + * Response containing generated embeddings. */ -export interface SystemMessage { +export interface EmbeddingsResponse { /** - * The content of the "system prompt". If multiple system messages are provided, - * they are concatenated. The underlying Llama Stack code may also add other system - * messages (for example, for formatting tool definitions). - */ - content: InterleavedContent; - - /** - * Must be "system" to identify this as a system message + * List of embedding vectors, one per input content. Each embedding is a list of + * floats. The dimensionality of the embedding is model-specific; you can check + * model metadata using /models/{model_id} */ - role: 'system'; + embeddings: Array>; } /** @@ -364,150 +193,135 @@ export interface TokenLogProbs { logprobs_by_token: { [key: string]: number }; } -export interface ToolCall { - arguments: - | string - | { - [key: string]: - | string - | number - | boolean - | Array - | { [key: string]: string | number | boolean | null } - | null; - }; - - call_id: string; - - tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - arguments_json?: string; +export interface InferenceBatchChatCompletionResponse { + batch: Array; } -/** - * Configuration for tool use. - */ -export interface ToolConfig { +export interface InferenceBatchChatCompletionParams { /** - * (Optional) Config for how to override the default system prompt. - - * `SystemMessageBehavior.append`: Appends the provided system message to the - * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default - * system prompt with the provided system message. The system message can include - * the string '{{function_definitions}}' to indicate where the function definitions - * should be inserted. + * The messages to generate completions for. */ - system_message_behavior?: 'append' | 'replace'; + messages_batch: Array>; /** - * (Optional) Whether tool use is automatic, required, or none. Can also specify a - * tool name to use a specific tool. Defaults to ToolChoice.auto. + * The identifier of the model to use. The model must be registered with Llama + * Stack and available via the /models endpoint. */ - tool_choice?: 'auto' | 'required' | 'none' | (string & {}); + model_id: string; /** - * (Optional) Instructs the model how to format tool calls. By default, Llama Stack - * will attempt to use a format that is best adapted to the model. - - * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - * tag. - `ToolPromptFormat.python_list`: The tool calls - * are output as Python syntax -- a list of function calls. + * (Optional) If specified, log probabilities for each token position will be + * returned. */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; -} - -export interface ToolDefinition { - tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - description?: string; - - parameters?: { [key: string]: ToolDefinition.Parameters }; -} - -export namespace ToolDefinition { - export interface Parameters { - param_type: string; - - default?: boolean | number | string | Array | unknown | null; - - description?: string; - - required?: boolean; - } -} - -export interface InferenceBatchChatCompletionResponse { - batch: Array; -} - -export interface InferenceBatchCompletionResponse { - batch: Array; -} + logprobs?: InferenceBatchChatCompletionParams.Logprobs; -/** - * Response containing generated embeddings. - */ -export interface InferenceEmbeddingsResponse { /** - * List of embedding vectors, one per input content. Each embedding is a list of - * floats. The dimensionality of the embedding is model-specific; you can check - * model metadata using /models/{model_id} + * (Optional) Grammar specification for guided (structured) decoding. */ - embeddings: Array>; -} - -export interface InferenceBatchChatCompletionParams { - messages_batch: Array>; - - model_id: string; - - logprobs?: InferenceBatchChatCompletionParams.Logprobs; + response_format?: Shared.ResponseFormat; /** - * Configuration for JSON schema-guided response generation. + * (Optional) Parameters to control the sampling strategy. */ - response_format?: ResponseFormat; + sampling_params?: Shared.SamplingParams; /** - * Sampling parameters. + * (Optional) Configuration for tool use. */ - sampling_params?: SamplingParams; + tool_config?: InferenceBatchChatCompletionParams.ToolConfig; /** - * Configuration for tool use. + * (Optional) List of tool definitions available to the model. */ - tool_config?: ToolConfig; - - tools?: Array; + tools?: Array; } export namespace InferenceBatchChatCompletionParams { + /** + * (Optional) If specified, log probabilities for each token position will be + * returned. + */ export interface Logprobs { /** * How many tokens (for each position) to return log probabilities for. */ top_k?: number; } + + /** + * (Optional) Configuration for tool use. + */ + export interface ToolConfig { + /** + * (Optional) Config for how to override the default system prompt. - + * `SystemMessageBehavior.append`: Appends the provided system message to the + * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default + * system prompt with the provided system message. The system message can include + * the string '{{function_definitions}}' to indicate where the function definitions + * should be inserted. + */ + system_message_behavior?: 'append' | 'replace'; + + /** + * (Optional) Whether tool use is automatic, required, or none. Can also specify a + * tool name to use a specific tool. Defaults to ToolChoice.auto. + */ + tool_choice?: 'auto' | 'required' | 'none' | (string & {}); + + /** + * (Optional) Instructs the model how to format tool calls. By default, Llama Stack + * will attempt to use a format that is best adapted to the model. - + * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - + * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a + * tag. - `ToolPromptFormat.python_list`: The tool calls + * are output as Python syntax -- a list of function calls. + */ + tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; + } + + export interface Tool { + tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); + + description?: string; + + parameters?: { [key: string]: Shared.ToolParamDefinition }; + } } export interface InferenceBatchCompletionParams { - content_batch: Array; + /** + * The content to generate completions for. + */ + content_batch: Array; + /** + * The identifier of the model to use. The model must be registered with Llama + * Stack and available via the /models endpoint. + */ model_id: string; + /** + * (Optional) If specified, log probabilities for each token position will be + * returned. + */ logprobs?: InferenceBatchCompletionParams.Logprobs; /** - * Configuration for JSON schema-guided response generation. + * (Optional) Grammar specification for guided (structured) decoding. */ - response_format?: ResponseFormat; + response_format?: Shared.ResponseFormat; /** - * Sampling parameters. + * (Optional) Parameters to control the sampling strategy. */ - sampling_params?: SamplingParams; + sampling_params?: Shared.SamplingParams; } export namespace InferenceBatchCompletionParams { + /** + * (Optional) If specified, log probabilities for each token position will be + * returned. + */ export interface Logprobs { /** * How many tokens (for each position) to return log probabilities for. @@ -516,11 +330,15 @@ export namespace InferenceBatchCompletionParams { } } -export interface InferenceChatCompletionParams { +export type InferenceChatCompletionParams = + | InferenceChatCompletionParamsNonStreaming + | InferenceChatCompletionParamsStreaming; + +export interface InferenceChatCompletionParamsBase { /** - * List of messages in the conversation + * List of messages in the conversation. */ - messages: Array; + messages: Array; /** * The identifier of the model to use. The model must be registered with Llama @@ -540,12 +358,12 @@ export interface InferenceChatCompletionParams { * providers support this format. - `ResponseFormat.grammar`: The grammar is a BNF * grammar. This format is more flexible, but not all providers support it. */ - response_format?: ResponseFormat; + response_format?: Shared.ResponseFormat; /** - * Parameters to control the sampling strategy + * Parameters to control the sampling strategy. */ - sampling_params?: SamplingParams; + sampling_params?: Shared.SamplingParams; /** * (Optional) If True, generate an SSE event stream of the response. Defaults to @@ -562,7 +380,7 @@ export interface InferenceChatCompletionParams { /** * (Optional) Configuration for tool use. */ - tool_config?: ToolConfig; + tool_config?: InferenceChatCompletionParams.ToolConfig; /** * (Optional) Instructs the model how to format tool calls. By default, Llama Stack @@ -576,9 +394,9 @@ export interface InferenceChatCompletionParams { tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; /** - * (Optional) List of tool definitions available to the model + * (Optional) List of tool definitions available to the model. */ - tools?: Array; + tools?: Array; } export namespace InferenceChatCompletionParams { @@ -592,13 +410,76 @@ export namespace InferenceChatCompletionParams { */ top_k?: number; } + + /** + * (Optional) Configuration for tool use. + */ + export interface ToolConfig { + /** + * (Optional) Config for how to override the default system prompt. - + * `SystemMessageBehavior.append`: Appends the provided system message to the + * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default + * system prompt with the provided system message. The system message can include + * the string '{{function_definitions}}' to indicate where the function definitions + * should be inserted. + */ + system_message_behavior?: 'append' | 'replace'; + + /** + * (Optional) Whether tool use is automatic, required, or none. Can also specify a + * tool name to use a specific tool. Defaults to ToolChoice.auto. + */ + tool_choice?: 'auto' | 'required' | 'none' | (string & {}); + + /** + * (Optional) Instructs the model how to format tool calls. By default, Llama Stack + * will attempt to use a format that is best adapted to the model. - + * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - + * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a + * tag. - `ToolPromptFormat.python_list`: The tool calls + * are output as Python syntax -- a list of function calls. + */ + tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; + } + + export interface Tool { + tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); + + description?: string; + + parameters?: { [key: string]: Shared.ToolParamDefinition }; + } + + export type InferenceChatCompletionParamsNonStreaming = + InferenceAPI.InferenceChatCompletionParamsNonStreaming; + export type InferenceChatCompletionParamsStreaming = InferenceAPI.InferenceChatCompletionParamsStreaming; } -export interface InferenceCompletionParams { +export interface InferenceChatCompletionParamsNonStreaming extends InferenceChatCompletionParamsBase { /** - * The content to generate a completion for + * (Optional) If True, generate an SSE event stream of the response. Defaults to + * False. */ - content: InterleavedContent; + stream?: false; +} + +export interface InferenceChatCompletionParamsStreaming extends InferenceChatCompletionParamsBase { + /** + * (Optional) If True, generate an SSE event stream of the response. Defaults to + * False. + */ + stream: true; +} + +export type InferenceCompletionParams = + | InferenceCompletionParamsNonStreaming + | InferenceCompletionParamsStreaming; + +export interface InferenceCompletionParamsBase { + /** + * The content to generate a completion for. + */ + content: Shared.InterleavedContent; /** * The identifier of the model to use. The model must be registered with Llama @@ -613,14 +494,14 @@ export interface InferenceCompletionParams { logprobs?: InferenceCompletionParams.Logprobs; /** - * (Optional) Grammar specification for guided (structured) decoding + * (Optional) Grammar specification for guided (structured) decoding. */ - response_format?: ResponseFormat; + response_format?: Shared.ResponseFormat; /** - * (Optional) Parameters to control the sampling strategy + * (Optional) Parameters to control the sampling strategy. */ - sampling_params?: SamplingParams; + sampling_params?: Shared.SamplingParams; /** * (Optional) If True, generate an SSE event stream of the response. Defaults to @@ -640,6 +521,25 @@ export namespace InferenceCompletionParams { */ top_k?: number; } + + export type InferenceCompletionParamsNonStreaming = InferenceAPI.InferenceCompletionParamsNonStreaming; + export type InferenceCompletionParamsStreaming = InferenceAPI.InferenceCompletionParamsStreaming; +} + +export interface InferenceCompletionParamsNonStreaming extends InferenceCompletionParamsBase { + /** + * (Optional) If True, generate an SSE event stream of the response. Defaults to + * False. + */ + stream?: false; +} + +export interface InferenceCompletionParamsStreaming extends InferenceCompletionParamsBase { + /** + * (Optional) If True, generate an SSE event stream of the response. Defaults to + * False. + */ + stream: true; } export interface InferenceEmbeddingsParams { @@ -648,7 +548,7 @@ export interface InferenceEmbeddingsParams { * InterleavedContentItem (and hence can be multimodal). The behavior depends on * the model and provider. Some models may only support text. */ - contents: Array | Array; + contents: Array | Array; /** * The identifier of the model to use. The model must be an embedding model @@ -677,27 +577,19 @@ export interface InferenceEmbeddingsParams { export declare namespace Inference { export { - type ChatCompletionResponse as ChatCompletionResponse, - type CompletionMessage as CompletionMessage, + type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, type CompletionResponse as CompletionResponse, - type InterleavedContent as InterleavedContent, - type InterleavedContentItem as InterleavedContentItem, - type Message as Message, - type MetricInResponse as MetricInResponse, - type ResponseFormat as ResponseFormat, - type SamplingParams as SamplingParams, - type SystemMessage as SystemMessage, + type EmbeddingsResponse as EmbeddingsResponse, type TokenLogProbs as TokenLogProbs, - type ToolCall as ToolCall, - type ToolConfig as ToolConfig, - type ToolDefinition as ToolDefinition, type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse, - type InferenceBatchCompletionResponse as InferenceBatchCompletionResponse, - type InferenceEmbeddingsResponse as InferenceEmbeddingsResponse, type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams, type InferenceBatchCompletionParams as InferenceBatchCompletionParams, type InferenceChatCompletionParams as InferenceChatCompletionParams, + type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, + type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, type InferenceCompletionParams as InferenceCompletionParams, + type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming, + type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming, type InferenceEmbeddingsParams as InferenceEmbeddingsParams, }; } diff --git a/src/resources/inspect.ts b/src/resources/inspect.ts index e0c46f6..b84d8f2 100644 --- a/src/resources/inspect.ts +++ b/src/resources/inspect.ts @@ -1,29 +1,57 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; +import { APIResource } from '../resource'; +import * as Core from '../core'; export class Inspect extends APIResource { - listRoutes(options?: RequestOptions): APIPromise { - return this._client.get('/v1/inspect/routes', options); + /** + * Get the health of the service. + */ + health(options?: Core.RequestOptions): Core.APIPromise { + return this._client.get('/v1/health', options); + } + + /** + * Get the version of the service. + */ + version(options?: Core.RequestOptions): Core.APIPromise { + return this._client.get('/v1/version', options); } } -export interface InspectListRoutesResponse { - data: Array; +export interface HealthInfo { + status: 'OK' | 'Error' | 'Not Implemented'; } -export namespace InspectListRoutesResponse { - export interface Data { - method: string; +export interface ProviderInfo { + api: string; - provider_types: Array; + config: { [key: string]: boolean | number | string | Array | unknown | null }; - route: string; - } + health: { [key: string]: boolean | number | string | Array | unknown | null }; + + provider_id: string; + + provider_type: string; +} + +export interface RouteInfo { + method: string; + + provider_types: Array; + + route: string; +} + +export interface VersionInfo { + version: string; } export declare namespace Inspect { - export { type InspectListRoutesResponse as InspectListRoutesResponse }; + export { + type HealthInfo as HealthInfo, + type ProviderInfo as ProviderInfo, + type RouteInfo as RouteInfo, + type VersionInfo as VersionInfo, + }; } diff --git a/src/resources/models.ts b/src/resources/models.ts index 6117263..a6d5474 100644 --- a/src/resources/models.ts +++ b/src/resources/models.ts @@ -1,38 +1,53 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { buildHeaders } from '../internal/headers'; -import { RequestOptions } from '../internal/request-options'; -import { path } from '../internal/utils/path'; +import { APIResource } from '../resource'; +import * as Core from '../core'; export class Models extends APIResource { - create(body: ModelCreateParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/models', { body, ...options }); + /** + * Get a model by its identifier. + */ + retrieve(modelId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/models/${modelId}`, options); } - retrieve(modelID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/models/${modelID}`, options); + /** + * List all models. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1/models', options) as Core.APIPromise<{ data: ModelListResponse }> + )._thenUnwrap((obj) => obj.data); } - list(options?: RequestOptions): APIPromise { - return this._client.get('/v1/models', options); + /** + * Register a model. + */ + register(body: ModelRegisterParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/v1/models', { body, ...options }); } - delete(modelID: string, options?: RequestOptions): APIPromise { - return this._client.delete(path`/v1/models/${modelID}`, { + /** + * Unregister a model. + */ + unregister(modelId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/models/${modelId}`, { ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } } +export interface ListModelsResponse { + data: ModelListResponse; +} + export interface Model { identifier: string; metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - model_type: ModelType; + model_type: 'llm' | 'embedding'; provider_id: string; @@ -41,29 +56,40 @@ export interface Model { provider_resource_id?: string; } -export type ModelType = 'llm' | 'embedding'; - -export interface ModelListResponse { - data: Array; -} +export type ModelListResponse = Array; -export interface ModelCreateParams { +export interface ModelRegisterParams { + /** + * The identifier of the model to register. + */ model_id: string; + /** + * Any additional metadata for this model. + */ metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; - model_type?: ModelType; + /** + * The type of model to register. + */ + model_type?: 'llm' | 'embedding'; + /** + * The identifier of the provider. + */ provider_id?: string; + /** + * The identifier of the model in the provider. + */ provider_model_id?: string; } export declare namespace Models { export { + type ListModelsResponse as ListModelsResponse, type Model as Model, - type ModelType as ModelType, type ModelListResponse as ModelListResponse, - type ModelCreateParams as ModelCreateParams, + type ModelRegisterParams as ModelRegisterParams, }; } diff --git a/src/resources/openai/index.ts b/src/resources/openai/index.ts deleted file mode 100644 index c86b754..0000000 --- a/src/resources/openai/index.ts +++ /dev/null @@ -1,11 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { OpenAI } from './openai'; -export { - V1, - type ChoiceLogprobs, - type TokenLogProb, - type V1GenerateCompletionResponse, - type V1ListModelsResponse, - type V1GenerateCompletionParams, -} from './v1/index'; diff --git a/src/resources/openai/openai.ts b/src/resources/openai/openai.ts deleted file mode 100644 index 15435e5..0000000 --- a/src/resources/openai/openai.ts +++ /dev/null @@ -1,29 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../core/resource'; -import * as V1API from './v1/v1'; -import { - ChoiceLogprobs, - TokenLogProb, - V1, - V1GenerateCompletionParams, - V1GenerateCompletionResponse, - V1ListModelsResponse, -} from './v1/v1'; - -export class OpenAI extends APIResource { - v1: V1API.V1 = new V1API.V1(this._client); -} - -OpenAI.V1 = V1; - -export declare namespace OpenAI { - export { - V1 as V1, - type ChoiceLogprobs as ChoiceLogprobs, - type TokenLogProb as TokenLogProb, - type V1GenerateCompletionResponse as V1GenerateCompletionResponse, - type V1ListModelsResponse as V1ListModelsResponse, - type V1GenerateCompletionParams as V1GenerateCompletionParams, - }; -} diff --git a/src/resources/openai/v1/chat.ts b/src/resources/openai/v1/chat.ts deleted file mode 100644 index 72c2140..0000000 --- a/src/resources/openai/v1/chat.ts +++ /dev/null @@ -1,494 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../../core/resource'; -import * as ChatAPI from './chat'; -import * as V1API from './v1'; -import { APIPromise } from '../../../core/api-promise'; -import { RequestOptions } from '../../../internal/request-options'; - -export class Chat extends APIResource { - /** - * Generate an OpenAI-compatible chat completion for the given messages using the - * specified model. - */ - generateCompletion( - body: ChatGenerateCompletionParams, - options?: RequestOptions, - ): APIPromise { - return this._client.post('/v1/openai/v1/chat/completions', { body, ...options }); - } -} - -export type ChatCompletionContentPart = - | ChatCompletionContentPart.OpenAIChatCompletionContentPartTextParam - | ChatCompletionContentPart.OpenAIChatCompletionContentPartImageParam; - -export namespace ChatCompletionContentPart { - export interface OpenAIChatCompletionContentPartTextParam { - text: string; - - type: 'text'; - } - - export interface OpenAIChatCompletionContentPartImageParam { - image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; - - type: 'image_url'; - } - - export namespace OpenAIChatCompletionContentPartImageParam { - export interface ImageURL { - url: string; - - detail?: string; - } - } -} - -export interface ChatCompletionToolCall { - type: 'function'; - - id?: string; - - function?: ChatCompletionToolCall.Function; - - index?: number; -} - -export namespace ChatCompletionToolCall { - export interface Function { - arguments?: string; - - name?: string; - } -} - -/** - * A message from the user in an OpenAI-compatible chat completion request. - */ -export type MessageParam = - | MessageParam.OpenAIUserMessageParam - | MessageParam.OpenAISystemMessageParam - | MessageParam.OpenAIAssistantMessageParam - | MessageParam.OpenAIToolMessageParam - | MessageParam.OpenAIDeveloperMessageParam; - -export namespace MessageParam { - /** - * A message from the user in an OpenAI-compatible chat completion request. - */ - export interface OpenAIUserMessageParam { - /** - * The content of the message, which can include text and other media - */ - content: string | Array; - - /** - * Must be "user" to identify this as a user message - */ - role: 'user'; - - /** - * (Optional) The name of the user message participant. - */ - name?: string; - } - - /** - * A system message providing instructions or context to the model. - */ - export interface OpenAISystemMessageParam { - /** - * The content of the "system prompt". If multiple system messages are provided, - * they are concatenated. The underlying Llama Stack code may also add other system - * messages (for example, for formatting tool definitions). - */ - content: string | Array; - - /** - * Must be "system" to identify this as a system message - */ - role: 'system'; - - /** - * (Optional) The name of the system message participant. - */ - name?: string; - } - - /** - * A message containing the model's (assistant) response in an OpenAI-compatible - * chat completion request. - */ - export interface OpenAIAssistantMessageParam { - /** - * Must be "assistant" to identify this as the model's response - */ - role: 'assistant'; - - /** - * The content of the model's response - */ - content?: string | Array; - - /** - * (Optional) The name of the assistant message participant. - */ - name?: string; - - /** - * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object. - */ - tool_calls?: Array; - } - - /** - * A message representing the result of a tool invocation in an OpenAI-compatible - * chat completion request. - */ - export interface OpenAIToolMessageParam { - /** - * The response content from the tool - */ - content: string | Array; - - /** - * Must be "tool" to identify this as a tool response - */ - role: 'tool'; - - /** - * Unique identifier for the tool call this response is for - */ - tool_call_id: string; - } - - /** - * A message from the developer in an OpenAI-compatible chat completion request. - */ - export interface OpenAIDeveloperMessageParam { - /** - * The content of the developer message - */ - content: string | Array; - - /** - * Must be "developer" to identify this as a developer message - */ - role: 'developer'; - - /** - * (Optional) The name of the developer message participant. - */ - name?: string; - } -} - -/** - * Response from an OpenAI-compatible chat completion request. - */ -export type ChatGenerateCompletionResponse = - | ChatGenerateCompletionResponse.OpenAIChatCompletion - | ChatGenerateCompletionResponse.OpenAIChatCompletionChunk; - -export namespace ChatGenerateCompletionResponse { - /** - * Response from an OpenAI-compatible chat completion request. - */ - export interface OpenAIChatCompletion { - /** - * The ID of the chat completion - */ - id: string; - - /** - * List of choices - */ - choices: Array; - - /** - * The Unix timestamp in seconds when the chat completion was created - */ - created: number; - - /** - * The model that was used to generate the chat completion - */ - model: string; - - /** - * The object type, which will be "chat.completion" - */ - object: 'chat.completion'; - } - - export namespace OpenAIChatCompletion { - /** - * A choice from an OpenAI-compatible chat completion response. - */ - export interface Choice { - /** - * The reason the model stopped generating - */ - finish_reason: string; - - /** - * The index of the choice - */ - index: number; - - /** - * The message from the model - */ - message: ChatAPI.MessageParam; - - /** - * (Optional) The log probabilities for the tokens in the message - */ - logprobs?: V1API.ChoiceLogprobs; - } - } - - /** - * Chunk from a streaming response to an OpenAI-compatible chat completion request. - */ - export interface OpenAIChatCompletionChunk { - /** - * The ID of the chat completion - */ - id: string; - - /** - * List of choices - */ - choices: Array; - - /** - * The Unix timestamp in seconds when the chat completion was created - */ - created: number; - - /** - * The model that was used to generate the chat completion - */ - model: string; - - /** - * The object type, which will be "chat.completion.chunk" - */ - object: 'chat.completion.chunk'; - } - - export namespace OpenAIChatCompletionChunk { - /** - * A chunk choice from an OpenAI-compatible chat completion streaming response. - */ - export interface Choice { - /** - * The delta from the chunk - */ - delta: Choice.Delta; - - /** - * The reason the model stopped generating - */ - finish_reason: string; - - /** - * The index of the choice - */ - index: number; - - /** - * (Optional) The log probabilities for the tokens in the message - */ - logprobs?: V1API.ChoiceLogprobs; - } - - export namespace Choice { - /** - * The delta from the chunk - */ - export interface Delta { - /** - * (Optional) The content of the delta - */ - content?: string; - - /** - * (Optional) The refusal of the delta - */ - refusal?: string; - - /** - * (Optional) The role of the delta - */ - role?: string; - - /** - * (Optional) The tool calls of the delta - */ - tool_calls?: Array; - } - } - } -} - -export interface ChatGenerateCompletionParams { - /** - * List of messages in the conversation - */ - messages: Array; - - /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. - */ - model: string; - - /** - * (Optional) The penalty for repeated tokens - */ - frequency_penalty?: number; - - /** - * (Optional) The function call to use - */ - function_call?: string | { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) List of functions to use - */ - functions?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * (Optional) The logit bias to use - */ - logit_bias?: { [key: string]: number }; - - /** - * (Optional) The log probabilities to use - */ - logprobs?: boolean; - - /** - * (Optional) The maximum number of tokens to generate - */ - max_completion_tokens?: number; - - /** - * (Optional) The maximum number of tokens to generate - */ - max_tokens?: number; - - /** - * (Optional) The number of completions to generate - */ - n?: number; - - /** - * (Optional) Whether to parallelize tool calls - */ - parallel_tool_calls?: boolean; - - /** - * (Optional) The penalty for repeated tokens - */ - presence_penalty?: number; - - /** - * (Optional) The response format to use - */ - response_format?: - | ChatGenerateCompletionParams.OpenAIResponseFormatText - | ChatGenerateCompletionParams.OpenAIResponseFormatJsonSchema - | ChatGenerateCompletionParams.OpenAIResponseFormatJsonObject; - - /** - * (Optional) The seed to use - */ - seed?: number; - - /** - * (Optional) The stop tokens to use - */ - stop?: string | Array; - - /** - * (Optional) Whether to stream the response - */ - stream?: boolean; - - /** - * (Optional) The stream options to use - */ - stream_options?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) The temperature to use - */ - temperature?: number; - - /** - * (Optional) The tool choice to use - */ - tool_choice?: string | { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) The tools to use - */ - tools?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * (Optional) The top log probabilities to use - */ - top_logprobs?: number; - - /** - * (Optional) The top p to use - */ - top_p?: number; - - /** - * (Optional) The user to use - */ - user?: string; -} - -export namespace ChatGenerateCompletionParams { - export interface OpenAIResponseFormatText { - type: 'text'; - } - - export interface OpenAIResponseFormatJsonSchema { - json_schema: OpenAIResponseFormatJsonSchema.JsonSchema; - - type: 'json_schema'; - } - - export namespace OpenAIResponseFormatJsonSchema { - export interface JsonSchema { - name: string; - - description?: string; - - schema?: { [key: string]: boolean | number | string | Array | unknown | null }; - - strict?: boolean; - } - } - - export interface OpenAIResponseFormatJsonObject { - type: 'json_object'; - } -} - -export declare namespace Chat { - export { - type ChatCompletionContentPart as ChatCompletionContentPart, - type ChatCompletionToolCall as ChatCompletionToolCall, - type MessageParam as MessageParam, - type ChatGenerateCompletionResponse as ChatGenerateCompletionResponse, - type ChatGenerateCompletionParams as ChatGenerateCompletionParams, - }; -} diff --git a/src/resources/openai/v1/index.ts b/src/resources/openai/v1/index.ts deleted file mode 100644 index 39f2e0e..0000000 --- a/src/resources/openai/v1/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - Chat, - type ChatCompletionContentPart, - type ChatCompletionToolCall, - type MessageParam, - type ChatGenerateCompletionResponse, - type ChatGenerateCompletionParams, -} from './chat'; -export { Responses, type OpenAIResponse, type ResponseCreateParams } from './responses'; -export { - V1, - type ChoiceLogprobs, - type TokenLogProb, - type V1GenerateCompletionResponse, - type V1ListModelsResponse, - type V1GenerateCompletionParams, -} from './v1'; diff --git a/src/resources/openai/v1/responses.ts b/src/resources/openai/v1/responses.ts deleted file mode 100644 index e2b39a6..0000000 --- a/src/resources/openai/v1/responses.ts +++ /dev/null @@ -1,156 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../../core/resource'; -import { APIPromise } from '../../../core/api-promise'; -import { RequestOptions } from '../../../internal/request-options'; -import { path } from '../../../internal/utils/path'; - -export class Responses extends APIResource { - /** - * Create a new OpenAI response. - */ - create(body: ResponseCreateParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/openai/v1/responses', { body, ...options }); - } - - /** - * Retrieve an OpenAI response by its ID. - */ - retrieve(id: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/openai/v1/responses/${id}`, options); - } -} - -export interface OpenAIResponse { - id: string; - - created_at: number; - - model: string; - - object: 'response'; - - output: Array< - OpenAIResponse.OpenAIResponseOutputMessage | OpenAIResponse.OpenAIResponseOutputMessageWebSearchToolCall - >; - - parallel_tool_calls: boolean; - - status: string; - - error?: OpenAIResponse.Error; - - previous_response_id?: string; - - temperature?: number; - - top_p?: number; - - truncation?: string; - - user?: string; -} - -export namespace OpenAIResponse { - export interface OpenAIResponseOutputMessage { - id: string; - - content: Array; - - role: 'assistant'; - - status: string; - - type: 'message'; - } - - export namespace OpenAIResponseOutputMessage { - export interface Content { - text: string; - - type: 'output_text'; - } - } - - export interface OpenAIResponseOutputMessageWebSearchToolCall { - id: string; - - status: string; - - type: 'web_search_call'; - } - - export interface Error { - code: string; - - message: string; - } -} - -export interface ResponseCreateParams { - /** - * Input message(s) to create the response. - */ - input: string | Array; - - /** - * The underlying LLM used for completions. - */ - model: string; - - /** - * (Optional) if specified, the new response will be a continuation of the previous - * response. This can be used to easily fork-off new responses from existing - * responses. - */ - previous_response_id?: string; - - store?: boolean; - - stream?: boolean; - - temperature?: number; - - tools?: Array; -} - -export namespace ResponseCreateParams { - export interface UnionMember1 { - content: - | string - | Array< - | UnionMember1.OpenAIResponseInputMessageContentText - | UnionMember1.OpenAIResponseInputMessageContentImage - >; - - role: 'system' | 'developer' | 'user' | 'assistant'; - - type?: 'message'; - } - - export namespace UnionMember1 { - export interface OpenAIResponseInputMessageContentText { - text: string; - - type: 'input_text'; - } - - export interface OpenAIResponseInputMessageContentImage { - detail: 'low' | 'high' | 'auto'; - - type: 'input_image'; - - image_url?: string; - } - } - - export interface Tool { - type: 'web_search' | 'web_search_preview_2025_03_11'; - - search_context_size?: string; - } -} - -export declare namespace Responses { - export { type OpenAIResponse as OpenAIResponse, type ResponseCreateParams as ResponseCreateParams }; -} diff --git a/src/resources/openai/v1/v1.ts b/src/resources/openai/v1/v1.ts deleted file mode 100644 index eeca37f..0000000 --- a/src/resources/openai/v1/v1.ts +++ /dev/null @@ -1,254 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../../core/resource'; -import * as V1API from './v1'; -import * as ChatAPI from './chat'; -import { - Chat, - ChatCompletionContentPart, - ChatCompletionToolCall, - ChatGenerateCompletionParams, - ChatGenerateCompletionResponse, - MessageParam, -} from './chat'; -import * as ResponsesAPI from './responses'; -import { OpenAIResponse, ResponseCreateParams, Responses } from './responses'; -import { APIPromise } from '../../../core/api-promise'; -import { RequestOptions } from '../../../internal/request-options'; - -export class V1 extends APIResource { - responses: ResponsesAPI.Responses = new ResponsesAPI.Responses(this._client); - chat: ChatAPI.Chat = new ChatAPI.Chat(this._client); - - /** - * Generate an OpenAI-compatible completion for the given prompt using the - * specified model. - */ - generateCompletion( - body: V1GenerateCompletionParams, - options?: RequestOptions, - ): APIPromise { - return this._client.post('/v1/openai/v1/completions', { body, ...options }); - } - - listModels(options?: RequestOptions): APIPromise { - return this._client.get('/v1/openai/v1/models', options); - } -} - -/** - * The log probabilities for the tokens in the message from an OpenAI-compatible - * chat completion response. - */ -export interface ChoiceLogprobs { - /** - * (Optional) The log probabilities for the tokens in the message - */ - content?: Array; - - /** - * (Optional) The log probabilities for the tokens in the message - */ - refusal?: Array; -} - -/** - * The log probability for a token from an OpenAI-compatible chat completion - * response. - */ -export interface TokenLogProb { - token: string; - - logprob: number; - - top_logprobs: Array; - - bytes?: Array; -} - -export namespace TokenLogProb { - /** - * The top log probability for a token from an OpenAI-compatible chat completion - * response. - */ - export interface TopLogprob { - token: string; - - logprob: number; - - bytes?: Array; - } -} - -/** - * Response from an OpenAI-compatible completion request. - */ -export interface V1GenerateCompletionResponse { - id: string; - - choices: Array; - - created: number; - - model: string; - - object: 'text_completion'; -} - -export namespace V1GenerateCompletionResponse { - /** - * A choice from an OpenAI-compatible completion response. - */ - export interface Choice { - finish_reason: string; - - index: number; - - text: string; - - /** - * The log probabilities for the tokens in the message from an OpenAI-compatible - * chat completion response. - */ - logprobs?: V1API.ChoiceLogprobs; - } -} - -export interface V1ListModelsResponse { - data: Array; -} - -export namespace V1ListModelsResponse { - /** - * A model from OpenAI. - */ - export interface Data { - id: string; - - created: number; - - object: 'model'; - - owned_by: string; - } -} - -export interface V1GenerateCompletionParams { - /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. - */ - model: string; - - /** - * The prompt to generate a completion for - */ - prompt: string | Array | Array | Array>; - - /** - * (Optional) The number of completions to generate - */ - best_of?: number; - - /** - * (Optional) Whether to echo the prompt - */ - echo?: boolean; - - /** - * (Optional) The penalty for repeated tokens - */ - frequency_penalty?: number; - - guided_choice?: Array; - - /** - * (Optional) The logit bias to use - */ - logit_bias?: { [key: string]: number }; - - /** - * (Optional) The log probabilities to use - */ - logprobs?: boolean; - - /** - * (Optional) The maximum number of tokens to generate - */ - max_tokens?: number; - - /** - * (Optional) The number of completions to generate - */ - n?: number; - - /** - * (Optional) The penalty for repeated tokens - */ - presence_penalty?: number; - - prompt_logprobs?: number; - - /** - * (Optional) The seed to use - */ - seed?: number; - - /** - * (Optional) The stop tokens to use - */ - stop?: string | Array; - - /** - * (Optional) Whether to stream the response - */ - stream?: boolean; - - /** - * (Optional) The stream options to use - */ - stream_options?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) The temperature to use - */ - temperature?: number; - - /** - * (Optional) The top p to use - */ - top_p?: number; - - /** - * (Optional) The user to use - */ - user?: string; -} - -V1.Responses = Responses; -V1.Chat = Chat; - -export declare namespace V1 { - export { - type ChoiceLogprobs as ChoiceLogprobs, - type TokenLogProb as TokenLogProb, - type V1GenerateCompletionResponse as V1GenerateCompletionResponse, - type V1ListModelsResponse as V1ListModelsResponse, - type V1GenerateCompletionParams as V1GenerateCompletionParams, - }; - - export { - Responses as Responses, - type OpenAIResponse as OpenAIResponse, - type ResponseCreateParams as ResponseCreateParams, - }; - - export { - Chat as Chat, - type ChatCompletionContentPart as ChatCompletionContentPart, - type ChatCompletionToolCall as ChatCompletionToolCall, - type MessageParam as MessageParam, - type ChatGenerateCompletionResponse as ChatGenerateCompletionResponse, - type ChatGenerateCompletionParams as ChatGenerateCompletionParams, - }; -} diff --git a/src/resources/post-training/index.ts b/src/resources/post-training/index.ts index 27579f9..6fc7e36 100644 --- a/src/resources/post-training/index.ts +++ b/src/resources/post-training/index.ts @@ -2,17 +2,18 @@ export { Job, - type JobRetrieveArtifactsResponse, - type JobRetrieveStatusResponse, + type JobListResponse, + type JobArtifactsResponse, + type JobStatusResponse, + type JobArtifactsParams, type JobCancelParams, - type JobRetrieveArtifactsParams, - type JobRetrieveStatusParams, + type JobStatusParams, } from './job'; export { PostTraining, + type AlgorithmConfig, + type ListPostTrainingJobsResponse, type PostTrainingJob, - type TrainingConfig, - type PostTrainingListJobsResponse, - type PostTrainingFineTuneSupervisedParams, - type PostTrainingOptimizePreferencesParams, + type PostTrainingPreferenceOptimizeParams, + type PostTrainingSupervisedFineTuneParams, } from './post-training'; diff --git a/src/resources/post-training/job.ts b/src/resources/post-training/job.ts index b231c8c..cb2c48e 100644 --- a/src/resources/post-training/job.ts +++ b/src/resources/post-training/job.ts @@ -1,38 +1,61 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../core/resource'; -import { APIPromise } from '../../core/api-promise'; -import { buildHeaders } from '../../internal/headers'; -import { RequestOptions } from '../../internal/request-options'; +import { APIResource } from '../../resource'; +import * as Core from '../../core'; +import * as PostTrainingAPI from './post-training'; export class Job extends APIResource { - cancel(body: JobCancelParams, options?: RequestOptions): APIPromise { + /** + * Get all training jobs. + */ + list( + options?: Core.RequestOptions, + ): Core.APIPromise> { + return ( + this._client.get('/v1/post-training/jobs', options) as Core.APIPromise<{ + data: Array; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Get the artifacts of a training job. + */ + artifacts(query: JobArtifactsParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get('/v1/post-training/job/artifacts', { query, ...options }); + } + + /** + * Cancel a training job. + */ + cancel(body: JobCancelParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/post-training/job/cancel', { body, ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } - retrieveArtifacts( - query: JobRetrieveArtifactsParams, - options?: RequestOptions, - ): APIPromise { - return this._client.get('/v1/post-training/job/artifacts', { query, ...options }); + /** + * Get the status of a training job. + */ + status(query: JobStatusParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get('/v1/post-training/job/status', { query, ...options }); } +} - retrieveStatus( - query: JobRetrieveStatusParams, - options?: RequestOptions, - ): APIPromise { - return this._client.get('/v1/post-training/job/status', { query, ...options }); +export type JobListResponse = Array; + +export namespace JobListResponse { + export interface JobListResponseItem { + job_uuid: string; } } /** * Artifacts of a finetuning job. */ -export interface JobRetrieveArtifactsResponse { +export interface JobArtifactsResponse { checkpoints: Array; job_uuid: string; @@ -41,7 +64,7 @@ export interface JobRetrieveArtifactsResponse { /** * Status of a finetuning job. */ -export interface JobRetrieveStatusResponse { +export interface JobStatusResponse { checkpoints: Array; job_uuid: string; @@ -57,24 +80,34 @@ export interface JobRetrieveStatusResponse { started_at?: string; } -export interface JobCancelParams { +export interface JobArtifactsParams { + /** + * The UUID of the job to get the artifacts of. + */ job_uuid: string; } -export interface JobRetrieveArtifactsParams { +export interface JobCancelParams { + /** + * The UUID of the job to cancel. + */ job_uuid: string; } -export interface JobRetrieveStatusParams { +export interface JobStatusParams { + /** + * The UUID of the job to get the status of. + */ job_uuid: string; } export declare namespace Job { export { - type JobRetrieveArtifactsResponse as JobRetrieveArtifactsResponse, - type JobRetrieveStatusResponse as JobRetrieveStatusResponse, + type JobListResponse as JobListResponse, + type JobArtifactsResponse as JobArtifactsResponse, + type JobStatusResponse as JobStatusResponse, + type JobArtifactsParams as JobArtifactsParams, type JobCancelParams as JobCancelParams, - type JobRetrieveArtifactsParams as JobRetrieveArtifactsParams, - type JobRetrieveStatusParams as JobRetrieveStatusParams, + type JobStatusParams as JobStatusParams, }; } diff --git a/src/resources/post-training/post-training.ts b/src/resources/post-training/post-training.ts index a51b2b5..11d14c0 100644 --- a/src/resources/post-training/post-training.ts +++ b/src/resources/post-training/post-training.ts @@ -1,37 +1,79 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../core/resource'; +import { APIResource } from '../../resource'; +import * as Core from '../../core'; import * as JobAPI from './job'; import { Job, + JobArtifactsParams, + JobArtifactsResponse, JobCancelParams, - JobRetrieveArtifactsParams, - JobRetrieveArtifactsResponse, - JobRetrieveStatusParams, - JobRetrieveStatusResponse, + JobListResponse, + JobStatusParams, + JobStatusResponse, } from './job'; -import { APIPromise } from '../../core/api-promise'; -import { RequestOptions } from '../../internal/request-options'; export class PostTraining extends APIResource { job: JobAPI.Job = new JobAPI.Job(this._client); - fineTuneSupervised( - body: PostTrainingFineTuneSupervisedParams, - options?: RequestOptions, - ): APIPromise { + /** + * Run preference optimization of a model. + */ + preferenceOptimize( + body: PostTrainingPreferenceOptimizeParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/v1/post-training/preference-optimize', { body, ...options }); + } + + /** + * Run supervised fine-tuning of a model. + */ + supervisedFineTune( + body: PostTrainingSupervisedFineTuneParams, + options?: Core.RequestOptions, + ): Core.APIPromise { return this._client.post('/v1/post-training/supervised-fine-tune', { body, ...options }); } +} + +export type AlgorithmConfig = AlgorithmConfig.LoraFinetuningConfig | AlgorithmConfig.QatFinetuningConfig; + +export namespace AlgorithmConfig { + export interface LoraFinetuningConfig { + alpha: number; + + apply_lora_to_mlp: boolean; + + apply_lora_to_output: boolean; + + lora_attn_modules: Array; - listJobs(options?: RequestOptions): APIPromise { - return this._client.get('/v1/post-training/jobs', options); + rank: number; + + type: 'LoRA'; + + quantize_base?: boolean; + + use_dora?: boolean; } - optimizePreferences( - body: PostTrainingOptimizePreferencesParams, - options?: RequestOptions, - ): APIPromise { - return this._client.post('/v1/post-training/preference-optimize', { body, ...options }); + export interface QatFinetuningConfig { + group_size: number; + + quantizer_name: string; + + type: 'QAT'; + } +} + +export interface ListPostTrainingJobsResponse { + data: Array; +} + +export namespace ListPostTrainingJobsResponse { + export interface Data { + job_uuid: string; } } @@ -39,141 +81,207 @@ export interface PostTrainingJob { job_uuid: string; } -export interface TrainingConfig { - gradient_accumulation_steps: number; +export interface PostTrainingPreferenceOptimizeParams { + /** + * The algorithm configuration. + */ + algorithm_config: PostTrainingPreferenceOptimizeParams.AlgorithmConfig; - max_steps_per_epoch: number; + /** + * The model to fine-tune. + */ + finetuned_model: string; - n_epochs: number; + /** + * The hyperparam search configuration. + */ + hyperparam_search_config: { [key: string]: boolean | number | string | Array | unknown | null }; - data_config?: TrainingConfig.DataConfig; + /** + * The UUID of the job to create. + */ + job_uuid: string; - dtype?: string; + /** + * The logger configuration. + */ + logger_config: { [key: string]: boolean | number | string | Array | unknown | null }; - efficiency_config?: TrainingConfig.EfficiencyConfig; + /** + * The training configuration. + */ + training_config: PostTrainingPreferenceOptimizeParams.TrainingConfig; +} - max_validation_steps?: number; +export namespace PostTrainingPreferenceOptimizeParams { + /** + * The algorithm configuration. + */ + export interface AlgorithmConfig { + epsilon: number; - optimizer_config?: TrainingConfig.OptimizerConfig; -} + gamma: number; -export namespace TrainingConfig { - export interface DataConfig { - batch_size: number; + reward_clip: number; + + reward_scale: number; + } + + /** + * The training configuration. + */ + export interface TrainingConfig { + gradient_accumulation_steps: number; + + max_steps_per_epoch: number; - data_format: 'instruct' | 'dialog'; + n_epochs: number; - dataset_id: string; + data_config?: TrainingConfig.DataConfig; - shuffle: boolean; + dtype?: string; - packed?: boolean; + efficiency_config?: TrainingConfig.EfficiencyConfig; - train_on_input?: boolean; + max_validation_steps?: number; - validation_dataset_id?: string; + optimizer_config?: TrainingConfig.OptimizerConfig; } - export interface EfficiencyConfig { - enable_activation_checkpointing?: boolean; + export namespace TrainingConfig { + export interface DataConfig { + batch_size: number; - enable_activation_offloading?: boolean; + data_format: 'instruct' | 'dialog'; - fsdp_cpu_offload?: boolean; + dataset_id: string; - memory_efficient_fsdp_wrap?: boolean; - } + shuffle: boolean; - export interface OptimizerConfig { - lr: number; + packed?: boolean; - num_warmup_steps: number; + train_on_input?: boolean; - optimizer_type: 'adam' | 'adamw' | 'sgd'; + validation_dataset_id?: string; + } - weight_decay: number; - } -} + export interface EfficiencyConfig { + enable_activation_checkpointing?: boolean; -export interface PostTrainingListJobsResponse { - data: Array; -} + enable_activation_offloading?: boolean; -export namespace PostTrainingListJobsResponse { - export interface Data { - job_uuid: string; + fsdp_cpu_offload?: boolean; + + memory_efficient_fsdp_wrap?: boolean; + } + + export interface OptimizerConfig { + lr: number; + + num_warmup_steps: number; + + optimizer_type: 'adam' | 'adamw' | 'sgd'; + + weight_decay: number; + } } } -export interface PostTrainingFineTuneSupervisedParams { +export interface PostTrainingSupervisedFineTuneParams { + /** + * The hyperparam search configuration. + */ hyperparam_search_config: { [key: string]: boolean | number | string | Array | unknown | null }; + /** + * The UUID of the job to create. + */ job_uuid: string; + /** + * The logger configuration. + */ logger_config: { [key: string]: boolean | number | string | Array | unknown | null }; - training_config: TrainingConfig; + /** + * The training configuration. + */ + training_config: PostTrainingSupervisedFineTuneParams.TrainingConfig; - algorithm_config?: - | PostTrainingFineTuneSupervisedParams.LoraFinetuningConfig - | PostTrainingFineTuneSupervisedParams.QatFinetuningConfig; + /** + * The algorithm configuration. + */ + algorithm_config?: AlgorithmConfig; + /** + * The directory to save checkpoint(s) to. + */ checkpoint_dir?: string; + /** + * The model to fine-tune. + */ model?: string; } -export namespace PostTrainingFineTuneSupervisedParams { - export interface LoraFinetuningConfig { - alpha: number; +export namespace PostTrainingSupervisedFineTuneParams { + /** + * The training configuration. + */ + export interface TrainingConfig { + gradient_accumulation_steps: number; - apply_lora_to_mlp: boolean; + max_steps_per_epoch: number; - apply_lora_to_output: boolean; + n_epochs: number; - lora_attn_modules: Array; + data_config?: TrainingConfig.DataConfig; - rank: number; + dtype?: string; - type: 'LoRA'; + efficiency_config?: TrainingConfig.EfficiencyConfig; - quantize_base?: boolean; + max_validation_steps?: number; - use_dora?: boolean; + optimizer_config?: TrainingConfig.OptimizerConfig; } - export interface QatFinetuningConfig { - group_size: number; + export namespace TrainingConfig { + export interface DataConfig { + batch_size: number; - quantizer_name: string; + data_format: 'instruct' | 'dialog'; - type: 'QAT'; - } -} + dataset_id: string; -export interface PostTrainingOptimizePreferencesParams { - algorithm_config: PostTrainingOptimizePreferencesParams.AlgorithmConfig; + shuffle: boolean; - finetuned_model: string; + packed?: boolean; - hyperparam_search_config: { [key: string]: boolean | number | string | Array | unknown | null }; + train_on_input?: boolean; - job_uuid: string; + validation_dataset_id?: string; + } - logger_config: { [key: string]: boolean | number | string | Array | unknown | null }; + export interface EfficiencyConfig { + enable_activation_checkpointing?: boolean; - training_config: TrainingConfig; -} + enable_activation_offloading?: boolean; -export namespace PostTrainingOptimizePreferencesParams { - export interface AlgorithmConfig { - epsilon: number; + fsdp_cpu_offload?: boolean; - gamma: number; + memory_efficient_fsdp_wrap?: boolean; + } - reward_clip: number; + export interface OptimizerConfig { + lr: number; - reward_scale: number; + num_warmup_steps: number; + + optimizer_type: 'adam' | 'adamw' | 'sgd'; + + weight_decay: number; + } } } @@ -181,19 +289,20 @@ PostTraining.Job = Job; export declare namespace PostTraining { export { + type AlgorithmConfig as AlgorithmConfig, + type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse, type PostTrainingJob as PostTrainingJob, - type TrainingConfig as TrainingConfig, - type PostTrainingListJobsResponse as PostTrainingListJobsResponse, - type PostTrainingFineTuneSupervisedParams as PostTrainingFineTuneSupervisedParams, - type PostTrainingOptimizePreferencesParams as PostTrainingOptimizePreferencesParams, + type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams, + type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams, }; export { Job as Job, - type JobRetrieveArtifactsResponse as JobRetrieveArtifactsResponse, - type JobRetrieveStatusResponse as JobRetrieveStatusResponse, + type JobListResponse as JobListResponse, + type JobArtifactsResponse as JobArtifactsResponse, + type JobStatusResponse as JobStatusResponse, + type JobArtifactsParams as JobArtifactsParams, type JobCancelParams as JobCancelParams, - type JobRetrieveArtifactsParams as JobRetrieveArtifactsParams, - type JobRetrieveStatusParams as JobRetrieveStatusParams, + type JobStatusParams as JobStatusParams, }; } diff --git a/src/resources/providers.ts b/src/resources/providers.ts index e537a81..dd37a42 100644 --- a/src/resources/providers.ts +++ b/src/resources/providers.ts @@ -1,36 +1,36 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; -import { path } from '../internal/utils/path'; +import { APIResource } from '../resource'; +import * as Core from '../core'; +import * as InspectAPI from './inspect'; export class Providers extends APIResource { - retrieve(providerID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/providers/${providerID}`, options); + /** + * Get detailed information about a specific provider. + */ + retrieve(providerId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/providers/${providerId}`, options); } - list(options?: RequestOptions): APIPromise { - return this._client.get('/v1/providers', options); + /** + * List all available providers. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1/providers', options) as Core.APIPromise<{ data: ProviderListResponse }> + )._thenUnwrap((obj) => obj.data); } } -export interface ProviderInfo { - api: string; - - config: { [key: string]: boolean | number | string | Array | unknown | null }; - - health: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_id: string; - - provider_type: string; +export interface ListProvidersResponse { + data: ProviderListResponse; } -export interface ProviderListResponse { - data: Array; -} +export type ProviderListResponse = Array; export declare namespace Providers { - export { type ProviderInfo as ProviderInfo, type ProviderListResponse as ProviderListResponse }; + export { + type ListProvidersResponse as ListProvidersResponse, + type ProviderListResponse as ProviderListResponse, + }; } diff --git a/src/resources/openai.ts b/src/resources/responses.ts similarity index 71% rename from src/resources/openai.ts rename to src/resources/responses.ts index 703e767..9d26aac 100644 --- a/src/resources/openai.ts +++ b/src/resources/responses.ts @@ -1,3 +1,3 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export * from './openai/index'; +export * from './responses/index'; diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts new file mode 100644 index 0000000..6a19891 --- /dev/null +++ b/src/resources/responses/index.ts @@ -0,0 +1,13 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { InputItems, type InputItemListResponse, type InputItemListParams } from './input-items'; +export { + Responses, + type ResponseObject, + type ResponseObjectStream, + type ResponseListResponse, + type ResponseCreateParams, + type ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming, + type ResponseListParams, +} from './responses'; diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts new file mode 100644 index 0000000..9f1303d --- /dev/null +++ b/src/resources/responses/input-items.ts @@ -0,0 +1,224 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; + +export class InputItems extends APIResource { + /** + * List input items for a given OpenAI response. + */ + list( + responseId: string, + query?: InputItemListParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + list(responseId: string, options?: Core.RequestOptions): Core.APIPromise; + list( + responseId: string, + query: InputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list(responseId, {}, query); + } + return this._client.get(`/v1/openai/v1/responses/${responseId}/input_items`, { query, ...options }); + } +} + +export interface InputItemListResponse { + data: Array< + | InputItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall + | InputItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall + | InputItemListResponse.OpenAIResponseOutputMessageFunctionToolCall + | InputItemListResponse.OpenAIResponseInputFunctionToolCallOutput + | InputItemListResponse.OpenAIResponseMessage + >; + + object: 'list'; +} + +export namespace InputItemListResponse { + export interface OpenAIResponseOutputMessageWebSearchToolCall { + id: string; + + status: string; + + type: 'web_search_call'; + } + + export interface OpenAIResponseOutputMessageFileSearchToolCall { + id: string; + + queries: Array; + + status: string; + + type: 'file_search_call'; + + results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + } + + export interface OpenAIResponseOutputMessageFunctionToolCall { + arguments: string; + + call_id: string; + + name: string; + + type: 'function_call'; + + id?: string; + + status?: string; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + export interface OpenAIResponseInputMessageContentText { + text: string; + + type: 'input_text'; + } + + export interface OpenAIResponseInputMessageContentImage { + detail: 'low' | 'high' | 'auto'; + + type: 'input_image'; + + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + export interface OpenAIResponseAnnotationFileCitation { + file_id: string; + + filename: string; + + index: number; + + type: 'file_citation'; + } + + export interface OpenAIResponseAnnotationCitation { + end_index: number; + + start_index: number; + + title: string; + + type: 'url_citation'; + + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } +} + +export interface InputItemListParams { + /** + * An item ID to list items after, used for pagination. + */ + after?: string; + + /** + * An item ID to list items before, used for pagination. + */ + before?: string; + + /** + * Additional fields to include in the response. + */ + include?: Array; + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and + * 100, and the default is 20. + */ + limit?: number; + + /** + * The order to return the input items in. Default is desc. + */ + order?: 'asc' | 'desc'; +} + +export declare namespace InputItems { + export { + type InputItemListResponse as InputItemListResponse, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts new file mode 100644 index 0000000..6dc465b --- /dev/null +++ b/src/resources/responses/responses.ts @@ -0,0 +1,1660 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import { APIPromise } from '../../core'; +import * as Core from '../../core'; +import * as ResponsesAPI from './responses'; +import * as InputItemsAPI from './input-items'; +import { InputItemListParams, InputItemListResponse, InputItems } from './input-items'; +import { Stream } from '../../streaming'; + +export class Responses extends APIResource { + inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); + + /** + * Create a new OpenAI response. + */ + create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; + create( + body: ResponseCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + body: ResponseCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | ResponseObject>; + create( + body: ResponseCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return this._client.post('/v1/openai/v1/responses', { + body, + ...options, + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; + } + + /** + * Retrieve an OpenAI response by its ID. + */ + retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/openai/v1/responses/${responseId}`, options); + } + + /** + * List all OpenAI responses. + */ + list(query?: ResponseListParams, options?: Core.RequestOptions): Core.APIPromise; + list(options?: Core.RequestOptions): Core.APIPromise; + list( + query: ResponseListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.get('/v1/openai/v1/responses', { query, ...options }); + } +} + +export interface ResponseObject { + id: string; + + created_at: number; + + model: string; + + object: 'response'; + + output: Array< + | ResponseObject.OpenAIResponseMessage + | ResponseObject.OpenAIResponseOutputMessageWebSearchToolCall + | ResponseObject.OpenAIResponseOutputMessageFileSearchToolCall + | ResponseObject.OpenAIResponseOutputMessageFunctionToolCall + | ResponseObject.OpenAIResponseOutputMessageMcpCall + | ResponseObject.OpenAIResponseOutputMessageMcpListTools + >; + + parallel_tool_calls: boolean; + + status: string; + + text: ResponseObject.Text; + + error?: ResponseObject.Error; + + previous_response_id?: string; + + temperature?: number; + + top_p?: number; + + truncation?: string; + + user?: string; +} + +export namespace ResponseObject { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + export interface OpenAIResponseInputMessageContentText { + text: string; + + type: 'input_text'; + } + + export interface OpenAIResponseInputMessageContentImage { + detail: 'low' | 'high' | 'auto'; + + type: 'input_image'; + + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + export interface OpenAIResponseAnnotationFileCitation { + file_id: string; + + filename: string; + + index: number; + + type: 'file_citation'; + } + + export interface OpenAIResponseAnnotationCitation { + end_index: number; + + start_index: number; + + title: string; + + type: 'url_citation'; + + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + export interface OpenAIResponseOutputMessageWebSearchToolCall { + id: string; + + status: string; + + type: 'web_search_call'; + } + + export interface OpenAIResponseOutputMessageFileSearchToolCall { + id: string; + + queries: Array; + + status: string; + + type: 'file_search_call'; + + results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + } + + export interface OpenAIResponseOutputMessageFunctionToolCall { + arguments: string; + + call_id: string; + + name: string; + + type: 'function_call'; + + id?: string; + + status?: string; + } + + export interface OpenAIResponseOutputMessageMcpCall { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_call'; + + error?: string; + + output?: string; + } + + export interface OpenAIResponseOutputMessageMcpListTools { + id: string; + + server_label: string; + + tools: Array; + + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + export interface Tool { + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + name: string; + + description?: string; + } + } + + export interface Text { + /** + * Configuration for Responses API text format. + */ + format?: Text.Format; + } + + export namespace Text { + /** + * Configuration for Responses API text format. + */ + export interface Format { + /** + * Must be "text", "json_schema", or "json_object" to identify the format type + */ + type: 'text' | 'json_schema' | 'json_object'; + + /** + * (Optional) A description of the response format. Only used for json_schema. + */ + description?: string; + + /** + * The name of the response format. Only used for json_schema. + */ + name?: string; + + /** + * The JSON schema the response should conform to. In a Python SDK, this is often a + * `pydantic` model. Only used for json_schema. + */ + schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Whether to strictly enforce the JSON schema. If true, the response + * must match the schema exactly. Only used for json_schema. + */ + strict?: boolean; + } + } + + export interface Error { + code: string; + + message: string; + } +} + +export type ResponseObjectStream = + | ResponseObjectStream.OpenAIResponseObjectStreamResponseCreated + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemAdded + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallInProgress + | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallSearching + | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallCompleted + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsInProgress + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsFailed + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsCompleted + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallInProgress + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallFailed + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallCompleted + | ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted; + +export namespace ResponseObjectStream { + export interface OpenAIResponseObjectStreamResponseCreated { + response: ResponsesAPI.ResponseObject; + + type: 'response.created'; + } + + export interface OpenAIResponseObjectStreamResponseOutputItemAdded { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + item: + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMessage + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageWebSearchToolCall + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFileSearchToolCall + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFunctionToolCall + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpCall + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools; + + output_index: number; + + response_id: string; + + sequence_number: number; + + type: 'response.output_item.added'; + } + + export namespace OpenAIResponseObjectStreamResponseOutputItemAdded { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + export interface OpenAIResponseInputMessageContentText { + text: string; + + type: 'input_text'; + } + + export interface OpenAIResponseInputMessageContentImage { + detail: 'low' | 'high' | 'auto'; + + type: 'input_image'; + + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + export interface OpenAIResponseAnnotationFileCitation { + file_id: string; + + filename: string; + + index: number; + + type: 'file_citation'; + } + + export interface OpenAIResponseAnnotationCitation { + end_index: number; + + start_index: number; + + title: string; + + type: 'url_citation'; + + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + export interface OpenAIResponseOutputMessageWebSearchToolCall { + id: string; + + status: string; + + type: 'web_search_call'; + } + + export interface OpenAIResponseOutputMessageFileSearchToolCall { + id: string; + + queries: Array; + + status: string; + + type: 'file_search_call'; + + results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + } + + export interface OpenAIResponseOutputMessageFunctionToolCall { + arguments: string; + + call_id: string; + + name: string; + + type: 'function_call'; + + id?: string; + + status?: string; + } + + export interface OpenAIResponseOutputMessageMcpCall { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_call'; + + error?: string; + + output?: string; + } + + export interface OpenAIResponseOutputMessageMcpListTools { + id: string; + + server_label: string; + + tools: Array; + + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + export interface Tool { + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + name: string; + + description?: string; + } + } + } + + export interface OpenAIResponseObjectStreamResponseOutputItemDone { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + item: + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseMessage + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageWebSearchToolCall + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFileSearchToolCall + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFunctionToolCall + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpCall + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools; + + output_index: number; + + response_id: string; + + sequence_number: number; + + type: 'response.output_item.done'; + } + + export namespace OpenAIResponseObjectStreamResponseOutputItemDone { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + export interface OpenAIResponseInputMessageContentText { + text: string; + + type: 'input_text'; + } + + export interface OpenAIResponseInputMessageContentImage { + detail: 'low' | 'high' | 'auto'; + + type: 'input_image'; + + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + export interface OpenAIResponseAnnotationFileCitation { + file_id: string; + + filename: string; + + index: number; + + type: 'file_citation'; + } + + export interface OpenAIResponseAnnotationCitation { + end_index: number; + + start_index: number; + + title: string; + + type: 'url_citation'; + + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + export interface OpenAIResponseOutputMessageWebSearchToolCall { + id: string; + + status: string; + + type: 'web_search_call'; + } + + export interface OpenAIResponseOutputMessageFileSearchToolCall { + id: string; + + queries: Array; + + status: string; + + type: 'file_search_call'; + + results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + } + + export interface OpenAIResponseOutputMessageFunctionToolCall { + arguments: string; + + call_id: string; + + name: string; + + type: 'function_call'; + + id?: string; + + status?: string; + } + + export interface OpenAIResponseOutputMessageMcpCall { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_call'; + + error?: string; + + output?: string; + } + + export interface OpenAIResponseOutputMessageMcpListTools { + id: string; + + server_label: string; + + tools: Array; + + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + export interface Tool { + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + name: string; + + description?: string; + } + } + } + + export interface OpenAIResponseObjectStreamResponseOutputTextDelta { + content_index: number; + + delta: string; + + item_id: string; + + output_index: number; + + sequence_number: number; + + type: 'response.output_text.delta'; + } + + export interface OpenAIResponseObjectStreamResponseOutputTextDone { + content_index: number; + + item_id: string; + + output_index: number; + + sequence_number: number; + + text: string; + + type: 'response.output_text.done'; + } + + export interface OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta { + delta: string; + + item_id: string; + + output_index: number; + + sequence_number: number; + + type: 'response.function_call_arguments.delta'; + } + + export interface OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone { + arguments: string; + + item_id: string; + + output_index: number; + + sequence_number: number; + + type: 'response.function_call_arguments.done'; + } + + export interface OpenAIResponseObjectStreamResponseWebSearchCallInProgress { + item_id: string; + + output_index: number; + + sequence_number: number; + + type: 'response.web_search_call.in_progress'; + } + + export interface OpenAIResponseObjectStreamResponseWebSearchCallSearching { + item_id: string; + + output_index: number; + + sequence_number: number; + + type: 'response.web_search_call.searching'; + } + + export interface OpenAIResponseObjectStreamResponseWebSearchCallCompleted { + item_id: string; + + output_index: number; + + sequence_number: number; + + type: 'response.web_search_call.completed'; + } + + export interface OpenAIResponseObjectStreamResponseMcpListToolsInProgress { + sequence_number: number; + + type: 'response.mcp_list_tools.in_progress'; + } + + export interface OpenAIResponseObjectStreamResponseMcpListToolsFailed { + sequence_number: number; + + type: 'response.mcp_list_tools.failed'; + } + + export interface OpenAIResponseObjectStreamResponseMcpListToolsCompleted { + sequence_number: number; + + type: 'response.mcp_list_tools.completed'; + } + + export interface OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta { + delta: string; + + item_id: string; + + output_index: number; + + sequence_number: number; + + type: 'response.mcp_call.arguments.delta'; + } + + export interface OpenAIResponseObjectStreamResponseMcpCallArgumentsDone { + arguments: string; + + item_id: string; + + output_index: number; + + sequence_number: number; + + type: 'response.mcp_call.arguments.done'; + } + + export interface OpenAIResponseObjectStreamResponseMcpCallInProgress { + item_id: string; + + output_index: number; + + sequence_number: number; + + type: 'response.mcp_call.in_progress'; + } + + export interface OpenAIResponseObjectStreamResponseMcpCallFailed { + sequence_number: number; + + type: 'response.mcp_call.failed'; + } + + export interface OpenAIResponseObjectStreamResponseMcpCallCompleted { + sequence_number: number; + + type: 'response.mcp_call.completed'; + } + + export interface OpenAIResponseObjectStreamResponseCompleted { + response: ResponsesAPI.ResponseObject; + + type: 'response.completed'; + } +} + +export interface ResponseListResponse { + data: Array; + + first_id: string; + + has_more: boolean; + + last_id: string; + + object: 'list'; +} + +export namespace ResponseListResponse { + export interface Data { + id: string; + + created_at: number; + + input: Array< + | Data.OpenAIResponseOutputMessageWebSearchToolCall + | Data.OpenAIResponseOutputMessageFileSearchToolCall + | Data.OpenAIResponseOutputMessageFunctionToolCall + | Data.OpenAIResponseInputFunctionToolCallOutput + | Data.OpenAIResponseMessage + >; + + model: string; + + object: 'response'; + + output: Array< + | Data.OpenAIResponseMessage + | Data.OpenAIResponseOutputMessageWebSearchToolCall + | Data.OpenAIResponseOutputMessageFileSearchToolCall + | Data.OpenAIResponseOutputMessageFunctionToolCall + | Data.OpenAIResponseOutputMessageMcpCall + | Data.OpenAIResponseOutputMessageMcpListTools + >; + + parallel_tool_calls: boolean; + + status: string; + + text: Data.Text; + + error?: Data.Error; + + previous_response_id?: string; + + temperature?: number; + + top_p?: number; + + truncation?: string; + + user?: string; + } + + export namespace Data { + export interface OpenAIResponseOutputMessageWebSearchToolCall { + id: string; + + status: string; + + type: 'web_search_call'; + } + + export interface OpenAIResponseOutputMessageFileSearchToolCall { + id: string; + + queries: Array; + + status: string; + + type: 'file_search_call'; + + results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + } + + export interface OpenAIResponseOutputMessageFunctionToolCall { + arguments: string; + + call_id: string; + + name: string; + + type: 'function_call'; + + id?: string; + + status?: string; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + export interface OpenAIResponseInputMessageContentText { + text: string; + + type: 'input_text'; + } + + export interface OpenAIResponseInputMessageContentImage { + detail: 'low' | 'high' | 'auto'; + + type: 'input_image'; + + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + export interface OpenAIResponseAnnotationFileCitation { + file_id: string; + + filename: string; + + index: number; + + type: 'file_citation'; + } + + export interface OpenAIResponseAnnotationCitation { + end_index: number; + + start_index: number; + + title: string; + + type: 'url_citation'; + + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + export interface OpenAIResponseInputMessageContentText { + text: string; + + type: 'input_text'; + } + + export interface OpenAIResponseInputMessageContentImage { + detail: 'low' | 'high' | 'auto'; + + type: 'input_image'; + + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + export interface OpenAIResponseAnnotationFileCitation { + file_id: string; + + filename: string; + + index: number; + + type: 'file_citation'; + } + + export interface OpenAIResponseAnnotationCitation { + end_index: number; + + start_index: number; + + title: string; + + type: 'url_citation'; + + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + export interface OpenAIResponseOutputMessageWebSearchToolCall { + id: string; + + status: string; + + type: 'web_search_call'; + } + + export interface OpenAIResponseOutputMessageFileSearchToolCall { + id: string; + + queries: Array; + + status: string; + + type: 'file_search_call'; + + results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + } + + export interface OpenAIResponseOutputMessageFunctionToolCall { + arguments: string; + + call_id: string; + + name: string; + + type: 'function_call'; + + id?: string; + + status?: string; + } + + export interface OpenAIResponseOutputMessageMcpCall { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_call'; + + error?: string; + + output?: string; + } + + export interface OpenAIResponseOutputMessageMcpListTools { + id: string; + + server_label: string; + + tools: Array; + + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + export interface Tool { + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + name: string; + + description?: string; + } + } + + export interface Text { + /** + * Configuration for Responses API text format. + */ + format?: Text.Format; + } + + export namespace Text { + /** + * Configuration for Responses API text format. + */ + export interface Format { + /** + * Must be "text", "json_schema", or "json_object" to identify the format type + */ + type: 'text' | 'json_schema' | 'json_object'; + + /** + * (Optional) A description of the response format. Only used for json_schema. + */ + description?: string; + + /** + * The name of the response format. Only used for json_schema. + */ + name?: string; + + /** + * The JSON schema the response should conform to. In a Python SDK, this is often a + * `pydantic` model. Only used for json_schema. + */ + schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Whether to strictly enforce the JSON schema. If true, the response + * must match the schema exactly. Only used for json_schema. + */ + strict?: boolean; + } + } + + export interface Error { + code: string; + + message: string; + } + } +} + +export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming; + +export interface ResponseCreateParamsBase { + /** + * Input message(s) to create the response. + */ + input: + | string + | Array< + | ResponseCreateParams.OpenAIResponseOutputMessageWebSearchToolCall + | ResponseCreateParams.OpenAIResponseOutputMessageFileSearchToolCall + | ResponseCreateParams.OpenAIResponseOutputMessageFunctionToolCall + | ResponseCreateParams.OpenAIResponseInputFunctionToolCallOutput + | ResponseCreateParams.OpenAIResponseMessage + >; + + /** + * The underlying LLM used for completions. + */ + model: string; + + instructions?: string; + + max_infer_iters?: number; + + /** + * (Optional) if specified, the new response will be a continuation of the previous + * response. This can be used to easily fork-off new responses from existing + * responses. + */ + previous_response_id?: string; + + store?: boolean; + + stream?: boolean; + + temperature?: number; + + text?: ResponseCreateParams.Text; + + tools?: Array< + | ResponseCreateParams.OpenAIResponseInputToolWebSearch + | ResponseCreateParams.OpenAIResponseInputToolFileSearch + | ResponseCreateParams.OpenAIResponseInputToolFunction + | ResponseCreateParams.OpenAIResponseInputToolMcp + >; +} + +export namespace ResponseCreateParams { + export interface OpenAIResponseOutputMessageWebSearchToolCall { + id: string; + + status: string; + + type: 'web_search_call'; + } + + export interface OpenAIResponseOutputMessageFileSearchToolCall { + id: string; + + queries: Array; + + status: string; + + type: 'file_search_call'; + + results?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + } + + export interface OpenAIResponseOutputMessageFunctionToolCall { + arguments: string; + + call_id: string; + + name: string; + + type: 'function_call'; + + id?: string; + + status?: string; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + export interface OpenAIResponseInputMessageContentText { + text: string; + + type: 'input_text'; + } + + export interface OpenAIResponseInputMessageContentImage { + detail: 'low' | 'high' | 'auto'; + + type: 'input_image'; + + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + export interface OpenAIResponseAnnotationFileCitation { + file_id: string; + + filename: string; + + index: number; + + type: 'file_citation'; + } + + export interface OpenAIResponseAnnotationCitation { + end_index: number; + + start_index: number; + + title: string; + + type: 'url_citation'; + + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + export interface Text { + /** + * Configuration for Responses API text format. + */ + format?: Text.Format; + } + + export namespace Text { + /** + * Configuration for Responses API text format. + */ + export interface Format { + /** + * Must be "text", "json_schema", or "json_object" to identify the format type + */ + type: 'text' | 'json_schema' | 'json_object'; + + /** + * (Optional) A description of the response format. Only used for json_schema. + */ + description?: string; + + /** + * The name of the response format. Only used for json_schema. + */ + name?: string; + + /** + * The JSON schema the response should conform to. In a Python SDK, this is often a + * `pydantic` model. Only used for json_schema. + */ + schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Whether to strictly enforce the JSON schema. If true, the response + * must match the schema exactly. Only used for json_schema. + */ + strict?: boolean; + } + } + + export interface OpenAIResponseInputToolWebSearch { + type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11'; + + search_context_size?: string; + } + + export interface OpenAIResponseInputToolFileSearch { + type: 'file_search'; + + vector_store_ids: Array; + + filters?: { [key: string]: boolean | number | string | Array | unknown | null }; + + max_num_results?: number; + + ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions; + } + + export namespace OpenAIResponseInputToolFileSearch { + export interface RankingOptions { + ranker?: string; + + score_threshold?: number; + } + } + + export interface OpenAIResponseInputToolFunction { + name: string; + + type: 'function'; + + description?: string; + + parameters?: { [key: string]: boolean | number | string | Array | unknown | null }; + + strict?: boolean; + } + + export interface OpenAIResponseInputToolMcp { + require_approval: 'always' | 'never' | OpenAIResponseInputToolMcp.ApprovalFilter; + + server_label: string; + + server_url: string; + + type: 'mcp'; + + allowed_tools?: Array | OpenAIResponseInputToolMcp.AllowedToolsFilter; + + headers?: { [key: string]: boolean | number | string | Array | unknown | null }; + } + + export namespace OpenAIResponseInputToolMcp { + export interface ApprovalFilter { + always?: Array; + + never?: Array; + } + + export interface AllowedToolsFilter { + tool_names?: Array; + } + } + + export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming; + export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming; +} + +export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { + stream?: false; +} + +export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { + stream: true; +} + +export interface ResponseListParams { + /** + * The ID of the last response to return. + */ + after?: string; + + /** + * The number of responses to return. + */ + limit?: number; + + /** + * The model to filter responses by. + */ + model?: string; + + /** + * The order to sort responses by when sorted by created_at ('asc' or 'desc'). + */ + order?: 'asc' | 'desc'; +} + +Responses.InputItems = InputItems; + +export declare namespace Responses { + export { + type ResponseObject as ResponseObject, + type ResponseObjectStream as ResponseObjectStream, + type ResponseListResponse as ResponseListResponse, + type ResponseCreateParams as ResponseCreateParams, + type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, + type ResponseListParams as ResponseListParams, + }; + + export { + InputItems as InputItems, + type InputItemListResponse as InputItemListResponse, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/routes.ts b/src/resources/routes.ts new file mode 100644 index 0000000..f5c533e --- /dev/null +++ b/src/resources/routes.ts @@ -0,0 +1,26 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../resource'; +import * as Core from '../core'; +import * as InspectAPI from './inspect'; + +export class Routes extends APIResource { + /** + * List all routes. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1/inspect/routes', options) as Core.APIPromise<{ data: RouteListResponse }> + )._thenUnwrap((obj) => obj.data); + } +} + +export interface ListRoutesResponse { + data: RouteListResponse; +} + +export type RouteListResponse = Array; + +export declare namespace Routes { + export { type ListRoutesResponse as ListRoutesResponse, type RouteListResponse as RouteListResponse }; +} diff --git a/src/resources/safety.ts b/src/resources/safety.ts index 98bef4a..f892930 100644 --- a/src/resources/safety.ts +++ b/src/resources/safety.ts @@ -1,40 +1,39 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import * as InferenceAPI from './inference'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; +import { APIResource } from '../resource'; +import * as Core from '../core'; +import * as Shared from './shared'; export class Safety extends APIResource { - runShield(body: SafetyRunShieldParams, options?: RequestOptions): APIPromise { + /** + * Run a shield. + */ + runShield(body: SafetyRunShieldParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/safety/run-shield', { body, ...options }); } } -export interface SafetyViolation { - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - violation_level: 'info' | 'warn' | 'error'; - - user_message?: string; -} - -export interface SafetyRunShieldResponse { - violation?: SafetyViolation; +export interface RunShieldResponse { + violation?: Shared.SafetyViolation; } export interface SafetyRunShieldParams { - messages: Array; - + /** + * The messages to run the shield on. + */ + messages: Array; + + /** + * The parameters of the shield. + */ params: { [key: string]: boolean | number | string | Array | unknown | null }; + /** + * The identifier of the shield to run. + */ shield_id: string; } export declare namespace Safety { - export { - type SafetyViolation as SafetyViolation, - type SafetyRunShieldResponse as SafetyRunShieldResponse, - type SafetyRunShieldParams as SafetyRunShieldParams, - }; + export { type RunShieldResponse as RunShieldResponse, type SafetyRunShieldParams as SafetyRunShieldParams }; } diff --git a/src/resources/scoring-functions.ts b/src/resources/scoring-functions.ts index 08e0a00..8037e65 100644 --- a/src/resources/scoring-functions.ts +++ b/src/resources/scoring-functions.ts @@ -1,89 +1,42 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import * as ScoringFunctionsAPI from './scoring-functions'; -import { APIPromise } from '../core/api-promise'; -import { buildHeaders } from '../internal/headers'; -import { RequestOptions } from '../internal/request-options'; -import { path } from '../internal/utils/path'; +import { APIResource } from '../resource'; +import * as Core from '../core'; +import * as Shared from './shared'; export class ScoringFunctions extends APIResource { - create(body: ScoringFunctionCreateParams, options?: RequestOptions): APIPromise { + /** + * Get a scoring function by its ID. + */ + retrieve(scoringFnId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/scoring-functions/${scoringFnId}`, options); + } + + /** + * List all scoring functions. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1/scoring-functions', options) as Core.APIPromise<{ + data: ScoringFunctionListResponse; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Register a scoring function. + */ + register(body: ScoringFunctionRegisterParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/scoring-functions', { body, ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } - - retrieve(scoringFnID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/scoring-functions/${scoringFnID}`, options); - } - - list(options?: RequestOptions): APIPromise { - return this._client.get('/v1/scoring-functions', options); - } } -export type AggregationFunctionType = - | 'average' - | 'weighted_average' - | 'median' - | 'categorical_count' - | 'accuracy'; - -export type ParamType = - | ParamType.StringType - | ParamType.NumberType - | ParamType.BooleanType - | ParamType.ArrayType - | ParamType.ObjectType - | ParamType.JsonType - | ParamType.UnionType - | ParamType.ChatCompletionInputType - | ParamType.CompletionInputType - | ParamType.AgentTurnInputType; - -export namespace ParamType { - export interface StringType { - type: 'string'; - } - - export interface NumberType { - type: 'number'; - } - - export interface BooleanType { - type: 'boolean'; - } - - export interface ArrayType { - type: 'array'; - } - - export interface ObjectType { - type: 'object'; - } - - export interface JsonType { - type: 'json'; - } - - export interface UnionType { - type: 'union'; - } - - export interface ChatCompletionInputType { - type: 'chat_completion_input'; - } - - export interface CompletionInputType { - type: 'completion_input'; - } - - export interface AgentTurnInputType { - type: 'agent_turn_input'; - } +export interface ListScoringFunctionsResponse { + data: ScoringFunctionListResponse; } export interface ScoringFn { @@ -93,7 +46,7 @@ export interface ScoringFn { provider_id: string; - return_type: ParamType; + return_type: Shared.ReturnType; type: 'scoring_function'; @@ -111,60 +64,76 @@ export type ScoringFnParams = export namespace ScoringFnParams { export interface LlmAsJudgeScoringFnParams { - aggregation_functions: Array; + aggregation_functions: Array< + 'average' | 'weighted_average' | 'median' | 'categorical_count' | 'accuracy' + >; judge_model: string; judge_score_regexes: Array; - type: ScoringFunctionsAPI.ScoringFnParamsType; + type: 'llm_as_judge'; prompt_template?: string; } export interface RegexParserScoringFnParams { - aggregation_functions: Array; + aggregation_functions: Array< + 'average' | 'weighted_average' | 'median' | 'categorical_count' | 'accuracy' + >; parsing_regexes: Array; - type: ScoringFunctionsAPI.ScoringFnParamsType; + type: 'regex_parser'; } export interface BasicScoringFnParams { - aggregation_functions: Array; + aggregation_functions: Array< + 'average' | 'weighted_average' | 'median' | 'categorical_count' | 'accuracy' + >; - type: ScoringFunctionsAPI.ScoringFnParamsType; + type: 'basic'; } } -export type ScoringFnParamsType = 'llm_as_judge' | 'regex_parser' | 'basic'; - -export interface ScoringFunctionListResponse { - data: Array; -} +export type ScoringFunctionListResponse = Array; -export interface ScoringFunctionCreateParams { +export interface ScoringFunctionRegisterParams { + /** + * The description of the scoring function. + */ description: string; - return_type: ParamType; + return_type: Shared.ReturnType; + /** + * The ID of the scoring function to register. + */ scoring_fn_id: string; + /** + * The parameters for the scoring function for benchmark eval, these can be + * overridden for app eval. + */ params?: ScoringFnParams; + /** + * The ID of the provider to use for the scoring function. + */ provider_id?: string; + /** + * The ID of the provider scoring function to use for the scoring function. + */ provider_scoring_fn_id?: string; } export declare namespace ScoringFunctions { export { - type AggregationFunctionType as AggregationFunctionType, - type ParamType as ParamType, + type ListScoringFunctionsResponse as ListScoringFunctionsResponse, type ScoringFn as ScoringFn, type ScoringFnParams as ScoringFnParams, - type ScoringFnParamsType as ScoringFnParamsType, type ScoringFunctionListResponse as ScoringFunctionListResponse, - type ScoringFunctionCreateParams as ScoringFunctionCreateParams, + type ScoringFunctionRegisterParams as ScoringFunctionRegisterParams, }; } diff --git a/src/resources/scoring.ts b/src/resources/scoring.ts index 6c0bb5e..55652e3 100644 --- a/src/resources/scoring.ts +++ b/src/resources/scoring.ts @@ -1,19 +1,25 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; +import { APIResource } from '../resource'; +import * as Core from '../core'; import * as ScoringFunctionsAPI from './scoring-functions'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; +import * as Shared from './shared'; export class Scoring extends APIResource { /** * Score a list of rows. */ - score(body: ScoringScoreParams, options?: RequestOptions): APIPromise { + score(body: ScoringScoreParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/scoring/score', { body, ...options }); } - scoreBatch(body: ScoringScoreBatchParams, options?: RequestOptions): APIPromise { + /** + * Score a batch of rows. + */ + scoreBatch( + body: ScoringScoreBatchParams, + options?: Core.RequestOptions, + ): Core.APIPromise { return this._client.post('/v1/scoring/score-batch', { body, ...options }); } } @@ -25,49 +31,15 @@ export interface ScoringScoreResponse { /** * A map of scoring function name to ScoringResult. */ - results: { [key: string]: ScoringScoreResponse.Results }; -} - -export namespace ScoringScoreResponse { - /** - * A scoring result for a single row. - */ - export interface Results { - /** - * Map of metric name to aggregated value - */ - aggregated_results: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The scoring result for each row. Each row is a map of column name to value. - */ - score_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - } + results: { [key: string]: Shared.ScoringResult }; } export interface ScoringScoreBatchResponse { - results: { [key: string]: ScoringScoreBatchResponse.Results }; + results: { [key: string]: Shared.ScoringResult }; dataset_id?: string; } -export namespace ScoringScoreBatchResponse { - /** - * A scoring result for a single row. - */ - export interface Results { - /** - * Map of metric name to aggregated value - */ - aggregated_results: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The scoring result for each row. Each row is a map of column name to value. - */ - score_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - } -} - export interface ScoringScoreParams { /** * The rows to score. @@ -81,10 +53,19 @@ export interface ScoringScoreParams { } export interface ScoringScoreBatchParams { + /** + * The ID of the dataset to score. + */ dataset_id: string; + /** + * Whether to save the results to a dataset. + */ save_results_dataset: boolean; + /** + * The scoring functions to use for the scoring. + */ scoring_functions: { [key: string]: ScoringFunctionsAPI.ScoringFnParams | null }; } diff --git a/src/resources/shared.ts b/src/resources/shared.ts new file mode 100644 index 0000000..ae3a208 --- /dev/null +++ b/src/resources/shared.ts @@ -0,0 +1,800 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import * as Shared from './shared'; +import * as InferenceAPI from './inference'; +import * as ToolRuntimeAPI from './tool-runtime/tool-runtime'; + +/** + * Configuration for an agent. + */ +export interface AgentConfig { + /** + * The system instructions for the agent + */ + instructions: string; + + /** + * The model identifier to use for the agent + */ + model: string; + + client_tools?: Array; + + /** + * Optional flag indicating whether session data has to be persisted + */ + enable_session_persistence?: boolean; + + input_shields?: Array; + + max_infer_iters?: number; + + /** + * Optional name for the agent, used in telemetry and identification + */ + name?: string; + + output_shields?: Array; + + /** + * Optional response format configuration + */ + response_format?: ResponseFormat; + + /** + * Sampling parameters. + */ + sampling_params?: SamplingParams; + + /** + * @deprecated Whether tool use is required or automatic. This is a hint to the + * model which may not be followed. It depends on the Instruction Following + * capabilities of the model. + */ + tool_choice?: 'auto' | 'required' | 'none'; + + /** + * Configuration for tool use. + */ + tool_config?: AgentConfig.ToolConfig; + + /** + * @deprecated Prompt format for calling custom / zero shot tools. + */ + tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; + + toolgroups?: Array; +} + +export namespace AgentConfig { + /** + * Configuration for tool use. + */ + export interface ToolConfig { + /** + * (Optional) Config for how to override the default system prompt. - + * `SystemMessageBehavior.append`: Appends the provided system message to the + * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default + * system prompt with the provided system message. The system message can include + * the string '{{function_definitions}}' to indicate where the function definitions + * should be inserted. + */ + system_message_behavior?: 'append' | 'replace'; + + /** + * (Optional) Whether tool use is automatic, required, or none. Can also specify a + * tool name to use a specific tool. Defaults to ToolChoice.auto. + */ + tool_choice?: 'auto' | 'required' | 'none' | (string & {}); + + /** + * (Optional) Instructs the model how to format tool calls. By default, Llama Stack + * will attempt to use a format that is best adapted to the model. - + * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - + * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a + * tag. - `ToolPromptFormat.python_list`: The tool calls + * are output as Python syntax -- a list of function calls. + */ + tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; + } + + export interface AgentToolGroupWithArgs { + args: { [key: string]: boolean | number | string | Array | unknown | null }; + + name: string; + } +} + +export interface BatchCompletion { + batch: Array; +} + +/** + * Response from a chat completion request. + */ +export interface ChatCompletionResponse { + /** + * The complete response message + */ + completion_message: CompletionMessage; + + /** + * Optional log probabilities for generated tokens + */ + logprobs?: Array; + + metrics?: Array; +} + +export namespace ChatCompletionResponse { + export interface Metric { + metric: string; + + value: number; + + unit?: string; + } +} + +/** + * A message containing the model's (assistant) response in a chat conversation. + */ +export interface CompletionMessage { + /** + * The content of the model's response + */ + content: InterleavedContent; + + /** + * Must be "assistant" to identify this as the model's response + */ + role: 'assistant'; + + /** + * Reason why the model stopped generating. Options are: - + * `StopReason.end_of_turn`: The model finished generating the entire response. - + * `StopReason.end_of_message`: The model finished generating but generated a + * partial response -- usually, a tool call. The user may call the tool and + * continue the conversation with the tool's response. - + * `StopReason.out_of_tokens`: The model ran out of token budget. + */ + stop_reason: 'end_of_turn' | 'end_of_message' | 'out_of_tokens'; + + /** + * List of tool calls. Each tool call is a ToolCall object. + */ + tool_calls?: Array; +} + +export type ContentDelta = ContentDelta.TextDelta | ContentDelta.ImageDelta | ContentDelta.ToolCallDelta; + +export namespace ContentDelta { + export interface TextDelta { + text: string; + + type: 'text'; + } + + export interface ImageDelta { + image: string; + + type: 'image'; + } + + export interface ToolCallDelta { + parse_status: 'started' | 'in_progress' | 'failed' | 'succeeded'; + + tool_call: Shared.ToolCallOrString; + + type: 'tool_call'; + } +} + +/** + * A document to be used for document ingestion in the RAG Tool. + */ +export interface Document { + /** + * The content of the document. + */ + content: + | string + | Document.ImageContentItem + | Document.TextContentItem + | Array + | Document.URL; + + /** + * The unique identifier for the document. + */ + document_id: string; + + /** + * Additional metadata for the document. + */ + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * The MIME type of the document. + */ + mime_type?: string; +} + +export namespace Document { + /** + * A image content item + */ + export interface ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + image: ImageContentItem.Image; + + /** + * Discriminator type of the content item. Always "image" + */ + type: 'image'; + } + + export namespace ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + export interface Image { + /** + * base64 encoded image data as string + */ + data?: string; + + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + url?: Image.URL; + } + + export namespace Image { + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + export interface URL { + uri: string; + } + } + } + + /** + * A text content item + */ + export interface TextContentItem { + /** + * Text content + */ + text: string; + + /** + * Discriminator type of the content item. Always "text" + */ + type: 'text'; + } + + export interface URL { + uri: string; + } +} + +/** + * A image content item + */ +export type InterleavedContent = + | string + | InterleavedContent.ImageContentItem + | InterleavedContent.TextContentItem + | Array; + +export namespace InterleavedContent { + /** + * A image content item + */ + export interface ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + image: ImageContentItem.Image; + + /** + * Discriminator type of the content item. Always "image" + */ + type: 'image'; + } + + export namespace ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + export interface Image { + /** + * base64 encoded image data as string + */ + data?: string; + + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + url?: Image.URL; + } + + export namespace Image { + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + export interface URL { + uri: string; + } + } + } + + /** + * A text content item + */ + export interface TextContentItem { + /** + * Text content + */ + text: string; + + /** + * Discriminator type of the content item. Always "text" + */ + type: 'text'; + } +} + +/** + * A image content item + */ +export type InterleavedContentItem = + | InterleavedContentItem.ImageContentItem + | InterleavedContentItem.TextContentItem; + +export namespace InterleavedContentItem { + /** + * A image content item + */ + export interface ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + image: ImageContentItem.Image; + + /** + * Discriminator type of the content item. Always "image" + */ + type: 'image'; + } + + export namespace ImageContentItem { + /** + * Image as a base64 encoded string or an URL + */ + export interface Image { + /** + * base64 encoded image data as string + */ + data?: string; + + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + url?: Image.URL; + } + + export namespace Image { + /** + * A URL of the image or data URL in the format of data:image/{type};base64,{data}. + * Note that URL could have length limits. + */ + export interface URL { + uri: string; + } + } + } + + /** + * A text content item + */ + export interface TextContentItem { + /** + * Text content + */ + text: string; + + /** + * Discriminator type of the content item. Always "text" + */ + type: 'text'; + } +} + +/** + * A message from the user in a chat conversation. + */ +export type Message = UserMessage | SystemMessage | ToolResponseMessage | CompletionMessage; + +export type ParamType = + | ParamType.StringType + | ParamType.NumberType + | ParamType.BooleanType + | ParamType.ArrayType + | ParamType.ObjectType + | ParamType.JsonType + | ParamType.UnionType + | ParamType.ChatCompletionInputType + | ParamType.CompletionInputType + | ParamType.AgentTurnInputType; + +export namespace ParamType { + export interface StringType { + type: 'string'; + } + + export interface NumberType { + type: 'number'; + } + + export interface BooleanType { + type: 'boolean'; + } + + export interface ArrayType { + type: 'array'; + } + + export interface ObjectType { + type: 'object'; + } + + export interface JsonType { + type: 'json'; + } + + export interface UnionType { + type: 'union'; + } + + export interface ChatCompletionInputType { + type: 'chat_completion_input'; + } + + export interface CompletionInputType { + type: 'completion_input'; + } + + export interface AgentTurnInputType { + type: 'agent_turn_input'; + } +} + +/** + * Configuration for the RAG query generation. + */ +export interface QueryConfig { + /** + * Template for formatting each retrieved chunk in the context. Available + * placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk content + * string), {metadata} (chunk metadata dict). Default: "Result {index}\nContent: + * {chunk.content}\nMetadata: {metadata}\n" + */ + chunk_template: string; + + /** + * Maximum number of chunks to retrieve. + */ + max_chunks: number; + + /** + * Maximum number of tokens in the context. + */ + max_tokens_in_context: number; + + /** + * Configuration for the query generator. + */ + query_generator_config: QueryGeneratorConfig; + + /** + * Search mode for retrieval—either "vector", "keyword", or "hybrid". Default + * "vector". + */ + mode?: string; + + /** + * Configuration for the ranker to use in hybrid search. Defaults to RRF ranker. + */ + ranker?: QueryConfig.RrfRanker | QueryConfig.WeightedRanker; +} + +export namespace QueryConfig { + /** + * Reciprocal Rank Fusion (RRF) ranker configuration. + */ + export interface RrfRanker { + /** + * The impact factor for RRF scoring. Higher values give more weight to + * higher-ranked results. Must be greater than 0. Default of 60 is from the + * original RRF paper (Cormack et al., 2009). + */ + impact_factor: number; + + /** + * The type of ranker, always "rrf" + */ + type: 'rrf'; + } + + /** + * Weighted ranker configuration that combines vector and keyword scores. + */ + export interface WeightedRanker { + /** + * Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use + * vector scores, values in between blend both scores. + */ + alpha: number; + + /** + * The type of ranker, always "weighted" + */ + type: 'weighted'; + } +} + +export type QueryGeneratorConfig = + | QueryGeneratorConfig.DefaultRagQueryGeneratorConfig + | QueryGeneratorConfig.LlmragQueryGeneratorConfig; + +export namespace QueryGeneratorConfig { + export interface DefaultRagQueryGeneratorConfig { + separator: string; + + type: 'default'; + } + + export interface LlmragQueryGeneratorConfig { + model: string; + + template: string; + + type: 'llm'; + } +} + +export interface QueryResult { + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * A image content item + */ + content?: InterleavedContent; +} + +/** + * Configuration for JSON schema-guided response generation. + */ +export type ResponseFormat = ResponseFormat.JsonSchemaResponseFormat | ResponseFormat.GrammarResponseFormat; + +export namespace ResponseFormat { + /** + * Configuration for JSON schema-guided response generation. + */ + export interface JsonSchemaResponseFormat { + /** + * The JSON schema the response should conform to. In a Python SDK, this is often a + * `pydantic` model. + */ + json_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Must be "json_schema" to identify this format type + */ + type: 'json_schema'; + } + + /** + * Configuration for grammar-guided response generation. + */ + export interface GrammarResponseFormat { + /** + * The BNF grammar specification the response should conform to + */ + bnf: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Must be "grammar" to identify this format type + */ + type: 'grammar'; + } +} + +export interface ReturnType { + type: + | 'string' + | 'number' + | 'boolean' + | 'array' + | 'object' + | 'json' + | 'union' + | 'chat_completion_input' + | 'completion_input' + | 'agent_turn_input'; +} + +export interface SafetyViolation { + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + violation_level: 'info' | 'warn' | 'error'; + + user_message?: string; +} + +/** + * Sampling parameters. + */ +export interface SamplingParams { + /** + * The sampling strategy. + */ + strategy: + | SamplingParams.GreedySamplingStrategy + | SamplingParams.TopPSamplingStrategy + | SamplingParams.TopKSamplingStrategy; + + /** + * The maximum number of tokens that can be generated in the completion. The token + * count of your prompt plus max_tokens cannot exceed the model's context length. + */ + max_tokens?: number; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on + * whether they appear in the text so far, increasing the model's likelihood to + * talk about new topics. + */ + repetition_penalty?: number; + + /** + * Up to 4 sequences where the API will stop generating further tokens. The + * returned text will not contain the stop sequence. + */ + stop?: Array; +} + +export namespace SamplingParams { + export interface GreedySamplingStrategy { + type: 'greedy'; + } + + export interface TopPSamplingStrategy { + type: 'top_p'; + + temperature?: number; + + top_p?: number; + } + + export interface TopKSamplingStrategy { + top_k: number; + + type: 'top_k'; + } +} + +/** + * A scoring result for a single row. + */ +export interface ScoringResult { + /** + * Map of metric name to aggregated value + */ + aggregated_results: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * The scoring result for each row. Each row is a map of column name to value. + */ + score_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; +} + +/** + * A system message providing instructions or context to the model. + */ +export interface SystemMessage { + /** + * The content of the "system prompt". If multiple system messages are provided, + * they are concatenated. The underlying Llama Stack code may also add other system + * messages (for example, for formatting tool definitions). + */ + content: InterleavedContent; + + /** + * Must be "system" to identify this as a system message + */ + role: 'system'; +} + +export interface ToolCall { + arguments: + | string + | { + [key: string]: + | string + | number + | boolean + | Array + | { [key: string]: string | number | boolean | null } + | null; + }; + + call_id: string; + + tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); + + arguments_json?: string; +} + +export type ToolCallOrString = string | ToolCall; + +export interface ToolParamDefinition { + param_type: string; + + default?: boolean | number | string | Array | unknown | null; + + description?: string; + + required?: boolean; +} + +/** + * A message representing the result of a tool invocation. + */ +export interface ToolResponseMessage { + /** + * Unique identifier for the tool call this response is for + */ + call_id: string; + + /** + * The response content from the tool + */ + content: InterleavedContent; + + /** + * Must be "tool" to identify this as a tool response + */ + role: 'tool'; +} + +/** + * A message from the user in a chat conversation. + */ +export interface UserMessage { + /** + * The content of the message, which can include text and other media + */ + content: InterleavedContent; + + /** + * Must be "user" to identify this as a user message + */ + role: 'user'; + + /** + * (Optional) This field is used internally by Llama Stack to pass RAG context. + * This field may be removed in the API in the future. + */ + context?: InterleavedContent; +} diff --git a/src/resources/shields.ts b/src/resources/shields.ts index 6135150..b53dfae 100644 --- a/src/resources/shields.ts +++ b/src/resources/shields.ts @@ -1,24 +1,37 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; -import { path } from '../internal/utils/path'; +import { APIResource } from '../resource'; +import * as Core from '../core'; export class Shields extends APIResource { - create(body: ShieldCreateParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/shields', { body, ...options }); + /** + * Get a shield by its identifier. + */ + retrieve(identifier: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/shields/${identifier}`, options); } - retrieve(identifier: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/shields/${identifier}`, options); + /** + * List all shields. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1/shields', options) as Core.APIPromise<{ data: ShieldListResponse }> + )._thenUnwrap((obj) => obj.data); } - list(options?: RequestOptions): APIPromise { - return this._client.get('/v1/shields', options); + /** + * Register a shield. + */ + register(body: ShieldRegisterParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/v1/shields', { body, ...options }); } } +export interface ListShieldsResponse { + data: ShieldListResponse; +} + /** * A safety shield resource that can be used to check content */ @@ -34,24 +47,35 @@ export interface Shield { provider_resource_id?: string; } -export interface ShieldListResponse { - data: Array; -} +export type ShieldListResponse = Array; -export interface ShieldCreateParams { +export interface ShieldRegisterParams { + /** + * The identifier of the shield to register. + */ shield_id: string; + /** + * The parameters of the shield. + */ params?: { [key: string]: boolean | number | string | Array | unknown | null }; + /** + * The identifier of the provider. + */ provider_id?: string; + /** + * The identifier of the shield in the provider. + */ provider_shield_id?: string; } export declare namespace Shields { export { + type ListShieldsResponse as ListShieldsResponse, type Shield as Shield, type ShieldListResponse as ShieldListResponse, - type ShieldCreateParams as ShieldCreateParams, + type ShieldRegisterParams as ShieldRegisterParams, }; } diff --git a/src/resources/synthetic-data-generation.ts b/src/resources/synthetic-data-generation.ts index bf98348..98c10a1 100644 --- a/src/resources/synthetic-data-generation.ts +++ b/src/resources/synthetic-data-generation.ts @@ -1,15 +1,14 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import * as InferenceAPI from './inference'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; +import { APIResource } from '../resource'; +import * as Core from '../core'; +import * as Shared from './shared'; export class SyntheticDataGeneration extends APIResource { generate( body: SyntheticDataGenerationGenerateParams, - options?: RequestOptions, - ): APIPromise { + options?: Core.RequestOptions, + ): Core.APIPromise { return this._client.post('/v1/synthetic-data-generation/generate', { body, ...options }); } } @@ -18,14 +17,14 @@ export class SyntheticDataGeneration extends APIResource { * Response from the synthetic data generation. Batch of (prompt, response, score) * tuples that pass the threshold. */ -export interface SyntheticDataGenerationGenerateResponse { +export interface SyntheticDataGenerationResponse { synthetic_data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; statistics?: { [key: string]: boolean | number | string | Array | unknown | null }; } export interface SyntheticDataGenerationGenerateParams { - dialogs: Array; + dialogs: Array; /** * The type of filtering function. @@ -37,7 +36,7 @@ export interface SyntheticDataGenerationGenerateParams { export declare namespace SyntheticDataGeneration { export { - type SyntheticDataGenerationGenerateResponse as SyntheticDataGenerationGenerateResponse, + type SyntheticDataGenerationResponse as SyntheticDataGenerationResponse, type SyntheticDataGenerationGenerateParams as SyntheticDataGenerationGenerateParams, }; } diff --git a/src/resources/telemetry.ts b/src/resources/telemetry.ts index e474405..c1dea80 100644 --- a/src/resources/telemetry.ts +++ b/src/resources/telemetry.ts @@ -1,3 +1,344 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export * from './telemetry/index'; +import { APIResource } from '../resource'; +import * as Core from '../core'; + +export class Telemetry extends APIResource { + /** + * Get a span by its ID. + */ + getSpan( + traceId: string, + spanId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/telemetry/traces/${traceId}/spans/${spanId}`, options); + } + + /** + * Get a span tree by its ID. + */ + getSpanTree( + spanId: string, + body: TelemetryGetSpanTreeParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return ( + this._client.post(`/v1/telemetry/spans/${spanId}/tree`, { body, ...options }) as Core.APIPromise<{ + data: TelemetryGetSpanTreeResponse; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Get a trace by its ID. + */ + getTrace(traceId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/telemetry/traces/${traceId}`, options); + } + + /** + * Log an event. + */ + logEvent(body: TelemetryLogEventParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/v1/telemetry/events', { + body, + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } + + /** + * Query spans. + */ + querySpans( + body: TelemetryQuerySpansParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return ( + this._client.post('/v1/telemetry/spans', { body, ...options }) as Core.APIPromise<{ + data: TelemetryQuerySpansResponse; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Query traces. + */ + queryTraces( + body: TelemetryQueryTracesParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return ( + this._client.post('/v1/telemetry/traces', { body, ...options }) as Core.APIPromise<{ + data: TelemetryQueryTracesResponse; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Save spans to a dataset. + */ + saveSpansToDataset( + body: TelemetrySaveSpansToDatasetParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/v1/telemetry/spans/export', { + body, + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } +} + +export type Event = Event.UnstructuredLogEvent | Event.MetricEvent | Event.StructuredLogEvent; + +export namespace Event { + export interface UnstructuredLogEvent { + message: string; + + severity: 'verbose' | 'debug' | 'info' | 'warn' | 'error' | 'critical'; + + span_id: string; + + timestamp: string; + + trace_id: string; + + type: 'unstructured_log'; + + attributes?: { [key: string]: string | number | boolean | null }; + } + + export interface MetricEvent { + metric: string; + + span_id: string; + + timestamp: string; + + trace_id: string; + + type: 'metric'; + + unit: string; + + value: number; + + attributes?: { [key: string]: string | number | boolean | null }; + } + + export interface StructuredLogEvent { + payload: StructuredLogEvent.SpanStartPayload | StructuredLogEvent.SpanEndPayload; + + span_id: string; + + timestamp: string; + + trace_id: string; + + type: 'structured_log'; + + attributes?: { [key: string]: string | number | boolean | null }; + } + + export namespace StructuredLogEvent { + export interface SpanStartPayload { + name: string; + + type: 'span_start'; + + parent_span_id?: string; + } + + export interface SpanEndPayload { + status: 'ok' | 'error'; + + type: 'span_end'; + } + } +} + +export interface QueryCondition { + key: string; + + op: 'eq' | 'ne' | 'gt' | 'lt'; + + value: boolean | number | string | Array | unknown | null; +} + +export interface QuerySpansResponse { + data: TelemetryQuerySpansResponse; +} + +export interface SpanWithStatus { + name: string; + + span_id: string; + + start_time: string; + + trace_id: string; + + attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; + + end_time?: string; + + parent_span_id?: string; + + status?: 'ok' | 'error'; +} + +export interface Trace { + root_span_id: string; + + start_time: string; + + trace_id: string; + + end_time?: string; +} + +export interface TelemetryGetSpanResponse { + name: string; + + span_id: string; + + start_time: string; + + trace_id: string; + + attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; + + end_time?: string; + + parent_span_id?: string; +} + +export type TelemetryGetSpanTreeResponse = { [key: string]: SpanWithStatus }; + +export type TelemetryQuerySpansResponse = Array; + +export namespace TelemetryQuerySpansResponse { + export interface TelemetryQuerySpansResponseItem { + name: string; + + span_id: string; + + start_time: string; + + trace_id: string; + + attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; + + end_time?: string; + + parent_span_id?: string; + } +} + +export type TelemetryQueryTracesResponse = Array; + +export interface TelemetryGetSpanTreeParams { + /** + * The attributes to return in the tree. + */ + attributes_to_return?: Array; + + /** + * The maximum depth of the tree. + */ + max_depth?: number; +} + +export interface TelemetryLogEventParams { + /** + * The event to log. + */ + event: Event; + + /** + * The time to live of the event. + */ + ttl_seconds: number; +} + +export interface TelemetryQuerySpansParams { + /** + * The attribute filters to apply to the spans. + */ + attribute_filters: Array; + + /** + * The attributes to return in the spans. + */ + attributes_to_return: Array; + + /** + * The maximum depth of the tree. + */ + max_depth?: number; +} + +export interface TelemetryQueryTracesParams { + /** + * The attribute filters to apply to the traces. + */ + attribute_filters?: Array; + + /** + * The limit of traces to return. + */ + limit?: number; + + /** + * The offset of the traces to return. + */ + offset?: number; + + /** + * The order by of the traces to return. + */ + order_by?: Array; +} + +export interface TelemetrySaveSpansToDatasetParams { + /** + * The attribute filters to apply to the spans. + */ + attribute_filters: Array; + + /** + * The attributes to save to the dataset. + */ + attributes_to_save: Array; + + /** + * The ID of the dataset to save the spans to. + */ + dataset_id: string; + + /** + * The maximum depth of the tree. + */ + max_depth?: number; +} + +export declare namespace Telemetry { + export { + type Event as Event, + type QueryCondition as QueryCondition, + type QuerySpansResponse as QuerySpansResponse, + type SpanWithStatus as SpanWithStatus, + type Trace as Trace, + type TelemetryGetSpanResponse as TelemetryGetSpanResponse, + type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse, + type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse, + type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse, + type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams, + type TelemetryLogEventParams as TelemetryLogEventParams, + type TelemetryQuerySpansParams as TelemetryQuerySpansParams, + type TelemetryQueryTracesParams as TelemetryQueryTracesParams, + type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams, + }; +} diff --git a/src/resources/telemetry/index.ts b/src/resources/telemetry/index.ts deleted file mode 100644 index e589e43..0000000 --- a/src/resources/telemetry/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - Spans, - type QueryCondition, - type SpanCreateResponse, - type SpanBuildTreeResponse, - type SpanCreateParams, - type SpanBuildTreeParams, - type SpanExportParams, -} from './spans'; -export { - Telemetry, - type EventType, - type StructuredLogType, - type TelemetryCreateEventParams, -} from './telemetry'; -export { - Traces, - type Span, - type Trace, - type TraceCreateResponse, - type TraceCreateParams, - type TraceRetrieveSpanParams, -} from './traces'; diff --git a/src/resources/telemetry/spans.ts b/src/resources/telemetry/spans.ts deleted file mode 100644 index 734170d..0000000 --- a/src/resources/telemetry/spans.ts +++ /dev/null @@ -1,101 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../core/resource'; -import * as TracesAPI from './traces'; -import { APIPromise } from '../../core/api-promise'; -import { buildHeaders } from '../../internal/headers'; -import { RequestOptions } from '../../internal/request-options'; -import { path } from '../../internal/utils/path'; - -export class Spans extends APIResource { - create(body: SpanCreateParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/telemetry/spans', { body, ...options }); - } - - buildTree( - spanID: string, - body: SpanBuildTreeParams, - options?: RequestOptions, - ): APIPromise { - return this._client.post(path`/v1/telemetry/spans/${spanID}/tree`, { body, ...options }); - } - - export(body: SpanExportParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/telemetry/spans/export', { - body, - ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), - }); - } -} - -export interface QueryCondition { - key: string; - - op: 'eq' | 'ne' | 'gt' | 'lt'; - - value: boolean | number | string | Array | unknown | null; -} - -export interface SpanCreateResponse { - data: Array; -} - -export interface SpanBuildTreeResponse { - data: { [key: string]: SpanBuildTreeResponse.Data }; -} - -export namespace SpanBuildTreeResponse { - export interface Data { - name: string; - - span_id: string; - - start_time: string; - - trace_id: string; - - attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; - - end_time?: string; - - parent_span_id?: string; - - status?: 'ok' | 'error'; - } -} - -export interface SpanCreateParams { - attribute_filters: Array; - - attributes_to_return: Array; - - max_depth?: number; -} - -export interface SpanBuildTreeParams { - attributes_to_return?: Array; - - max_depth?: number; -} - -export interface SpanExportParams { - attribute_filters: Array; - - attributes_to_save: Array; - - dataset_id: string; - - max_depth?: number; -} - -export declare namespace Spans { - export { - type QueryCondition as QueryCondition, - type SpanCreateResponse as SpanCreateResponse, - type SpanBuildTreeResponse as SpanBuildTreeResponse, - type SpanCreateParams as SpanCreateParams, - type SpanBuildTreeParams as SpanBuildTreeParams, - type SpanExportParams as SpanExportParams, - }; -} diff --git a/src/resources/telemetry/telemetry.ts b/src/resources/telemetry/telemetry.ts deleted file mode 100644 index baccd7a..0000000 --- a/src/resources/telemetry/telemetry.ts +++ /dev/null @@ -1,148 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../core/resource'; -import * as TelemetryAPI from './telemetry'; -import * as SpansAPI from './spans'; -import { - QueryCondition, - SpanBuildTreeParams, - SpanBuildTreeResponse, - SpanCreateParams, - SpanCreateResponse, - SpanExportParams, - Spans, -} from './spans'; -import * as TracesAPI from './traces'; -import { - Span, - Trace, - TraceCreateParams, - TraceCreateResponse, - TraceRetrieveSpanParams, - Traces, -} from './traces'; -import { APIPromise } from '../../core/api-promise'; -import { buildHeaders } from '../../internal/headers'; -import { RequestOptions } from '../../internal/request-options'; - -export class Telemetry extends APIResource { - traces: TracesAPI.Traces = new TracesAPI.Traces(this._client); - spans: SpansAPI.Spans = new SpansAPI.Spans(this._client); - - createEvent(body: TelemetryCreateEventParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/telemetry/events', { - body, - ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), - }); - } -} - -export type EventType = 'unstructured_log' | 'structured_log' | 'metric'; - -export type StructuredLogType = 'span_start' | 'span_end'; - -export interface TelemetryCreateEventParams { - event: - | TelemetryCreateEventParams.UnstructuredLogEvent - | TelemetryCreateEventParams.MetricEvent - | TelemetryCreateEventParams.StructuredLogEvent; - - ttl_seconds: number; -} - -export namespace TelemetryCreateEventParams { - export interface UnstructuredLogEvent { - message: string; - - severity: 'verbose' | 'debug' | 'info' | 'warn' | 'error' | 'critical'; - - span_id: string; - - timestamp: string; - - trace_id: string; - - type: TelemetryAPI.EventType; - - attributes?: { [key: string]: string | number | boolean | null }; - } - - export interface MetricEvent { - metric: string; - - span_id: string; - - timestamp: string; - - trace_id: string; - - type: TelemetryAPI.EventType; - - unit: string; - - value: number; - - attributes?: { [key: string]: string | number | boolean | null }; - } - - export interface StructuredLogEvent { - payload: StructuredLogEvent.SpanStartPayload | StructuredLogEvent.SpanEndPayload; - - span_id: string; - - timestamp: string; - - trace_id: string; - - type: TelemetryAPI.EventType; - - attributes?: { [key: string]: string | number | boolean | null }; - } - - export namespace StructuredLogEvent { - export interface SpanStartPayload { - name: string; - - type: TelemetryAPI.StructuredLogType; - - parent_span_id?: string; - } - - export interface SpanEndPayload { - status: 'ok' | 'error'; - - type: TelemetryAPI.StructuredLogType; - } - } -} - -Telemetry.Traces = Traces; -Telemetry.Spans = Spans; - -export declare namespace Telemetry { - export { - type EventType as EventType, - type StructuredLogType as StructuredLogType, - type TelemetryCreateEventParams as TelemetryCreateEventParams, - }; - - export { - Traces as Traces, - type Span as Span, - type Trace as Trace, - type TraceCreateResponse as TraceCreateResponse, - type TraceCreateParams as TraceCreateParams, - type TraceRetrieveSpanParams as TraceRetrieveSpanParams, - }; - - export { - Spans as Spans, - type QueryCondition as QueryCondition, - type SpanCreateResponse as SpanCreateResponse, - type SpanBuildTreeResponse as SpanBuildTreeResponse, - type SpanCreateParams as SpanCreateParams, - type SpanBuildTreeParams as SpanBuildTreeParams, - type SpanExportParams as SpanExportParams, - }; -} diff --git a/src/resources/telemetry/traces.ts b/src/resources/telemetry/traces.ts deleted file mode 100644 index b4bfbf8..0000000 --- a/src/resources/telemetry/traces.ts +++ /dev/null @@ -1,76 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../core/resource'; -import * as SpansAPI from './spans'; -import { APIPromise } from '../../core/api-promise'; -import { RequestOptions } from '../../internal/request-options'; -import { path } from '../../internal/utils/path'; - -export class Traces extends APIResource { - create(body: TraceCreateParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/telemetry/traces', { body, ...options }); - } - - retrieveSpan(spanID: string, params: TraceRetrieveSpanParams, options?: RequestOptions): APIPromise { - const { trace_id } = params; - return this._client.get(path`/v1/telemetry/traces/${trace_id}/spans/${spanID}`, options); - } - - retrieveTrace(traceID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/telemetry/traces/${traceID}`, options); - } -} - -export interface Span { - name: string; - - span_id: string; - - start_time: string; - - trace_id: string; - - attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; - - end_time?: string; - - parent_span_id?: string; -} - -export interface Trace { - root_span_id: string; - - start_time: string; - - trace_id: string; - - end_time?: string; -} - -export interface TraceCreateResponse { - data: Array; -} - -export interface TraceCreateParams { - attribute_filters?: Array; - - limit?: number; - - offset?: number; - - order_by?: Array; -} - -export interface TraceRetrieveSpanParams { - trace_id: string; -} - -export declare namespace Traces { - export { - type Span as Span, - type Trace as Trace, - type TraceCreateResponse as TraceCreateResponse, - type TraceCreateParams as TraceCreateParams, - type TraceRetrieveSpanParams as TraceRetrieveSpanParams, - }; -} diff --git a/src/resources/tool-runtime/index.ts b/src/resources/tool-runtime/index.ts index 1623d36..8297640 100644 --- a/src/resources/tool-runtime/index.ts +++ b/src/resources/tool-runtime/index.ts @@ -1,16 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { - RagTool, - type RagToolQueryContextResponse, - type RagToolInsertDocumentsParams, - type RagToolQueryContextParams, -} from './rag-tool'; +export { RagTool, type RagToolInsertParams, type RagToolQueryParams } from './rag-tool'; export { ToolRuntime, type ToolDef, - type URL, - type ToolRuntimeInvokeToolResponse, + type ToolInvocationResult, type ToolRuntimeListToolsResponse, type ToolRuntimeInvokeToolParams, type ToolRuntimeListToolsParams, diff --git a/src/resources/tool-runtime/rag-tool.ts b/src/resources/tool-runtime/rag-tool.ts index d05bc2b..9bcd4b2 100644 --- a/src/resources/tool-runtime/rag-tool.ts +++ b/src/resources/tool-runtime/rag-tool.ts @@ -1,177 +1,51 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../core/resource'; -import * as InferenceAPI from '../inference'; -import * as ToolRuntimeAPI from './tool-runtime'; -import { APIPromise } from '../../core/api-promise'; -import { buildHeaders } from '../../internal/headers'; -import { RequestOptions } from '../../internal/request-options'; +import { APIResource } from '../../resource'; +import * as Core from '../../core'; +import * as Shared from '../shared'; export class RagTool extends APIResource { /** * Index documents so they can be used by the RAG system */ - insertDocuments(body: RagToolInsertDocumentsParams, options?: RequestOptions): APIPromise { + insert(body: RagToolInsertParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/tool-runtime/rag-tool/insert', { body, ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } /** * Query the RAG system for context; typically invoked by the agent */ - queryContext( - body: RagToolQueryContextParams, - options?: RequestOptions, - ): APIPromise { + query(body: RagToolQueryParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/tool-runtime/rag-tool/query', { body, ...options }); } } -export interface RagToolQueryContextResponse { - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * A image content item - */ - content?: InferenceAPI.InterleavedContent; -} - -export interface RagToolInsertDocumentsParams { +export interface RagToolInsertParams { chunk_size_in_tokens: number; - documents: Array; + documents: Array; vector_db_id: string; } -export namespace RagToolInsertDocumentsParams { - /** - * A document to be used for document ingestion in the RAG Tool. - */ - export interface Document { - /** - * The content of the document. - */ - content: - | string - | Document.ImageContentItem - | Document.TextContentItem - | Array - | ToolRuntimeAPI.URL; - - /** - * The unique identifier for the document. - */ - document_id: string; - - /** - * Additional metadata for the document. - */ - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The MIME type of the document. - */ - mime_type?: string; - } - - export namespace Document { - /** - * A image content item - */ - export interface ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - image: ImageContentItem.Image; - - /** - * Discriminator type of the content item. Always "image" - */ - type: 'image'; - } - - export namespace ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - export interface Image { - /** - * base64 encoded image data as string - */ - data?: string; - - /** - * A URL of the image or data URL in the format of data:image/{type};base64,{data}. - * Note that URL could have length limits. - */ - url?: ToolRuntimeAPI.URL; - } - } - - /** - * A text content item - */ - export interface TextContentItem { - /** - * Text content - */ - text: string; - - /** - * Discriminator type of the content item. Always "text" - */ - type: 'text'; - } - } -} - -export interface RagToolQueryContextParams { +export interface RagToolQueryParams { /** * A image content item */ - content: InferenceAPI.InterleavedContent; + content: Shared.InterleavedContent; vector_db_ids: Array; - query_config?: RagToolQueryContextParams.QueryConfig; -} - -export namespace RagToolQueryContextParams { - export interface QueryConfig { - max_chunks: number; - - max_tokens_in_context: number; - - query_generator_config: - | QueryConfig.DefaultRagQueryGeneratorConfig - | QueryConfig.LlmragQueryGeneratorConfig; - } - - export namespace QueryConfig { - export interface DefaultRagQueryGeneratorConfig { - separator: string; - - type: 'default'; - } - - export interface LlmragQueryGeneratorConfig { - model: string; - - template: string; - - type: 'llm'; - } - } + /** + * Configuration for the RAG query generation. + */ + query_config?: Shared.QueryConfig; } export declare namespace RagTool { - export { - type RagToolQueryContextResponse as RagToolQueryContextResponse, - type RagToolInsertDocumentsParams as RagToolInsertDocumentsParams, - type RagToolQueryContextParams as RagToolQueryContextParams, - }; + export { type RagToolInsertParams as RagToolInsertParams, type RagToolQueryParams as RagToolQueryParams }; } diff --git a/src/resources/tool-runtime/tool-runtime.ts b/src/resources/tool-runtime/tool-runtime.ts index 78fbf68..a49f8fd 100644 --- a/src/resources/tool-runtime/tool-runtime.ts +++ b/src/resources/tool-runtime/tool-runtime.ts @@ -1,36 +1,45 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../core/resource'; -import * as InferenceAPI from '../inference'; -import * as ToolsAPI from '../tools'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as Shared from '../shared'; import * as RagToolAPI from './rag-tool'; -import { - RagTool, - RagToolInsertDocumentsParams, - RagToolQueryContextParams, - RagToolQueryContextResponse, -} from './rag-tool'; -import { APIPromise } from '../../core/api-promise'; -import { RequestOptions } from '../../internal/request-options'; +import { RagTool, RagToolInsertParams, RagToolQueryParams } from './rag-tool'; export class ToolRuntime extends APIResource { ragTool: RagToolAPI.RagTool = new RagToolAPI.RagTool(this._client); /** - * Run a tool with the given arguments + * Run a tool with the given arguments. */ invokeTool( body: ToolRuntimeInvokeToolParams, - options?: RequestOptions, - ): APIPromise { + options?: Core.RequestOptions, + ): Core.APIPromise { return this._client.post('/v1/tool-runtime/invoke', { body, ...options }); } + /** + * List all tools in the runtime. + */ + listTools( + query?: ToolRuntimeListToolsParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + listTools(options?: Core.RequestOptions): Core.APIPromise; listTools( - query: ToolRuntimeListToolsParams | null | undefined = {}, - options?: RequestOptions, - ): APIPromise { - return this._client.get('/v1/tool-runtime/list-tools', { query, ...options }); + query: ToolRuntimeListToolsParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.listTools({}, query); + } + return ( + this._client.get('/v1/tool-runtime/list-tools', { query, ...options }) as Core.APIPromise<{ + data: ToolRuntimeListToolsResponse; + }> + )._thenUnwrap((obj) => obj.data); } } @@ -41,18 +50,28 @@ export interface ToolDef { metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; - parameters?: Array; + parameters?: Array; } -export interface URL { - uri: string; +export namespace ToolDef { + export interface Parameter { + description: string; + + name: string; + + parameter_type: string; + + required: boolean; + + default?: boolean | number | string | Array | unknown | null; + } } -export interface ToolRuntimeInvokeToolResponse { +export interface ToolInvocationResult { /** * A image content item */ - content?: InferenceAPI.InterleavedContent; + content?: Shared.InterleavedContent; error_code?: number; @@ -61,29 +80,47 @@ export interface ToolRuntimeInvokeToolResponse { metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; } -export interface ToolRuntimeListToolsResponse { - data: Array; -} +export type ToolRuntimeListToolsResponse = Array; export interface ToolRuntimeInvokeToolParams { + /** + * A dictionary of arguments to pass to the tool. + */ kwargs: { [key: string]: boolean | number | string | Array | unknown | null }; + /** + * The name of the tool to invoke. + */ tool_name: string; } export interface ToolRuntimeListToolsParams { - mcp_endpoint?: URL; + /** + * The MCP endpoint to use for the tool group. + */ + mcp_endpoint?: ToolRuntimeListToolsParams.McpEndpoint; + /** + * The ID of the tool group to list tools for. + */ tool_group_id?: string; } +export namespace ToolRuntimeListToolsParams { + /** + * The MCP endpoint to use for the tool group. + */ + export interface McpEndpoint { + uri: string; + } +} + ToolRuntime.RagTool = RagTool; export declare namespace ToolRuntime { export { type ToolDef as ToolDef, - type URL as URL, - type ToolRuntimeInvokeToolResponse as ToolRuntimeInvokeToolResponse, + type ToolInvocationResult as ToolInvocationResult, type ToolRuntimeListToolsResponse as ToolRuntimeListToolsResponse, type ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams, type ToolRuntimeListToolsParams as ToolRuntimeListToolsParams, @@ -91,8 +128,7 @@ export declare namespace ToolRuntime { export { RagTool as RagTool, - type RagToolQueryContextResponse as RagToolQueryContextResponse, - type RagToolInsertDocumentsParams as RagToolInsertDocumentsParams, - type RagToolQueryContextParams as RagToolQueryContextParams, + type RagToolInsertParams as RagToolInsertParams, + type RagToolQueryParams as RagToolQueryParams, }; } diff --git a/src/resources/toolgroups.ts b/src/resources/toolgroups.ts index 66aff4f..5560263 100644 --- a/src/resources/toolgroups.ts +++ b/src/resources/toolgroups.ts @@ -1,46 +1,51 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import * as ToolRuntimeAPI from './tool-runtime/tool-runtime'; -import { APIPromise } from '../core/api-promise'; -import { buildHeaders } from '../internal/headers'; -import { RequestOptions } from '../internal/request-options'; -import { path } from '../internal/utils/path'; +import { APIResource } from '../resource'; +import * as Core from '../core'; export class Toolgroups extends APIResource { - retrieve(toolgroupID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/toolgroups/${toolgroupID}`, options); + /** + * List tool groups with optional provider. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1/toolgroups', options) as Core.APIPromise<{ data: ToolgroupListResponse }> + )._thenUnwrap((obj) => obj.data); } /** - * List tool groups with optional provider + * Get a tool group by its ID. */ - list(options?: RequestOptions): APIPromise { - return this._client.get('/v1/toolgroups', options); + get(toolgroupId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/toolgroups/${toolgroupId}`, options); } /** - * Register a tool group + * Register a tool group. */ - register(body: ToolgroupRegisterParams, options?: RequestOptions): APIPromise { + register(body: ToolgroupRegisterParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/toolgroups', { body, ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } /** - * Unregister a tool group + * Unregister a tool group. */ - unregister(toolgroupID: string, options?: RequestOptions): APIPromise { - return this._client.delete(path`/v1/toolgroups/${toolgroupID}`, { + unregister(toolgroupId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/toolgroups/${toolgroupId}`, { ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } } +export interface ListToolGroupsResponse { + data: ToolgroupListResponse; +} + export interface ToolGroup { identifier: string; @@ -50,27 +55,53 @@ export interface ToolGroup { args?: { [key: string]: boolean | number | string | Array | unknown | null }; - mcp_endpoint?: ToolRuntimeAPI.URL; + mcp_endpoint?: ToolGroup.McpEndpoint; provider_resource_id?: string; } -export interface ToolgroupListResponse { - data: Array; +export namespace ToolGroup { + export interface McpEndpoint { + uri: string; + } } +export type ToolgroupListResponse = Array; + export interface ToolgroupRegisterParams { + /** + * The ID of the provider to use for the tool group. + */ provider_id: string; + /** + * The ID of the tool group to register. + */ toolgroup_id: string; + /** + * A dictionary of arguments to pass to the tool group. + */ args?: { [key: string]: boolean | number | string | Array | unknown | null }; - mcp_endpoint?: ToolRuntimeAPI.URL; + /** + * The MCP endpoint to use for the tool group. + */ + mcp_endpoint?: ToolgroupRegisterParams.McpEndpoint; +} + +export namespace ToolgroupRegisterParams { + /** + * The MCP endpoint to use for the tool group. + */ + export interface McpEndpoint { + uri: string; + } } export declare namespace Toolgroups { export { + type ListToolGroupsResponse as ListToolGroupsResponse, type ToolGroup as ToolGroup, type ToolgroupListResponse as ToolgroupListResponse, type ToolgroupRegisterParams as ToolgroupRegisterParams, diff --git a/src/resources/tools.ts b/src/resources/tools.ts index 0e4de4b..a40dab8 100644 --- a/src/resources/tools.ts +++ b/src/resources/tools.ts @@ -1,37 +1,48 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; -import { path } from '../internal/utils/path'; +import { APIResource } from '../resource'; +import { isRequestOptions } from '../core'; +import * as Core from '../core'; export class Tools extends APIResource { - retrieve(toolName: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/tools/${toolName}`, options); + /** + * List tools with optional tool group. + */ + list(query?: ToolListParams, options?: Core.RequestOptions): Core.APIPromise; + list(options?: Core.RequestOptions): Core.APIPromise; + list( + query: ToolListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return ( + this._client.get('/v1/tools', { query, ...options }) as Core.APIPromise<{ data: ToolListResponse }> + )._thenUnwrap((obj) => obj.data); } /** - * List tools with optional tool group + * Get a tool by its name. */ - list( - query: ToolListParams | null | undefined = {}, - options?: RequestOptions, - ): APIPromise { - return this._client.get('/v1/tools', { query, ...options }); + get(toolName: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/tools/${toolName}`, options); } } +export interface ListToolsResponse { + data: ToolListResponse; +} + export interface Tool { description: string; identifier: string; - parameters: Array; + parameters: Array; provider_id: string; - tool_host: 'distribution' | 'client' | 'model_context_protocol'; - toolgroup_id: string; type: 'tool'; @@ -41,30 +52,33 @@ export interface Tool { provider_resource_id?: string; } -export interface ToolParameter { - description: string; +export namespace Tool { + export interface Parameter { + description: string; - name: string; + name: string; - parameter_type: string; + parameter_type: string; - required: boolean; + required: boolean; - default?: boolean | number | string | Array | unknown | null; + default?: boolean | number | string | Array | unknown | null; + } } -export interface ToolListResponse { - data: Array; -} +export type ToolListResponse = Array; export interface ToolListParams { + /** + * The ID of the tool group to list tools for. + */ toolgroup_id?: string; } export declare namespace Tools { export { + type ListToolsResponse as ListToolsResponse, type Tool as Tool, - type ToolParameter as ToolParameter, type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams, }; diff --git a/src/resources/vector-dbs.ts b/src/resources/vector-dbs.ts index 5373c98..55644d0 100644 --- a/src/resources/vector-dbs.ts +++ b/src/resources/vector-dbs.ts @@ -1,33 +1,51 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { buildHeaders } from '../internal/headers'; -import { RequestOptions } from '../internal/request-options'; -import { path } from '../internal/utils/path'; +import { APIResource } from '../resource'; +import * as Core from '../core'; export class VectorDBs extends APIResource { - create(body: VectorDBCreateParams, options?: RequestOptions): APIPromise { - return this._client.post('/v1/vector-dbs', { body, ...options }); + /** + * Get a vector database by its identifier. + */ + retrieve(vectorDBId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/vector-dbs/${vectorDBId}`, options); } - retrieve(vectorDBID: string, options?: RequestOptions): APIPromise { - return this._client.get(path`/v1/vector-dbs/${vectorDBID}`, options); + /** + * List all vector databases. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1/vector-dbs', options) as Core.APIPromise<{ data: VectorDBListResponse }> + )._thenUnwrap((obj) => obj.data); } - list(options?: RequestOptions): APIPromise { - return this._client.get('/v1/vector-dbs', options); + /** + * Register a vector database. + */ + register( + body: VectorDBRegisterParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/v1/vector-dbs', { body, ...options }); } - delete(vectorDBID: string, options?: RequestOptions): APIPromise { - return this._client.delete(path`/v1/vector-dbs/${vectorDBID}`, { + /** + * Unregister a vector database. + */ + unregister(vectorDBId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/vector-dbs/${vectorDBId}`, { ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } } -export interface VectorDB { +export interface ListVectorDBsResponse { + data: VectorDBListResponse; +} + +export interface VectorDBRetrieveResponse { embedding_dimension: number; embedding_model: string; @@ -41,26 +59,71 @@ export interface VectorDB { provider_resource_id?: string; } -export interface VectorDBListResponse { - data: Array; +export type VectorDBListResponse = Array; + +export namespace VectorDBListResponse { + export interface VectorDBListResponseItem { + embedding_dimension: number; + + embedding_model: string; + + identifier: string; + + provider_id: string; + + type: 'vector_db'; + + provider_resource_id?: string; + } +} + +export interface VectorDBRegisterResponse { + embedding_dimension: number; + + embedding_model: string; + + identifier: string; + + provider_id: string; + + type: 'vector_db'; + + provider_resource_id?: string; } -export interface VectorDBCreateParams { +export interface VectorDBRegisterParams { + /** + * The embedding model to use. + */ embedding_model: string; + /** + * The identifier of the vector database to register. + */ vector_db_id: string; + /** + * The dimension of the embedding model. + */ embedding_dimension?: number; + /** + * The identifier of the provider. + */ provider_id?: string; + /** + * The identifier of the vector database in the provider. + */ provider_vector_db_id?: string; } export declare namespace VectorDBs { export { - type VectorDB as VectorDB, + type ListVectorDBsResponse as ListVectorDBsResponse, + type VectorDBRetrieveResponse as VectorDBRetrieveResponse, type VectorDBListResponse as VectorDBListResponse, - type VectorDBCreateParams as VectorDBCreateParams, + type VectorDBRegisterResponse as VectorDBRegisterResponse, + type VectorDBRegisterParams as VectorDBRegisterParams, }; } diff --git a/src/resources/vector-io.ts b/src/resources/vector-io.ts index 2f0dce4..d6e6c33 100644 --- a/src/resources/vector-io.ts +++ b/src/resources/vector-io.ts @@ -1,75 +1,274 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../core/resource'; -import * as InferenceAPI from './inference'; -import { APIPromise } from '../core/api-promise'; -import { buildHeaders } from '../internal/headers'; -import { RequestOptions } from '../internal/request-options'; +import { APIResource } from '../resource'; +import * as Core from '../core'; +import * as Shared from './shared'; export class VectorIo extends APIResource { - insert(body: VectorIoInsertParams, options?: RequestOptions): APIPromise { + /** + * Insert chunks into a vector database. + */ + insert(body: VectorIoInsertParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/vector-io/insert', { body, ...options, - headers: buildHeaders([{ Accept: '*/*' }, options?.headers]), + headers: { Accept: '*/*', ...options?.headers }, }); } - query(body: VectorIoQueryParams, options?: RequestOptions): APIPromise { + /** + * Query chunks from a vector database. + */ + query(body: VectorIoQueryParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/vector-io/query', { body, ...options }); } } -export interface VectorIoQueryResponse { - chunks: Array; +export interface QueryChunksResponse { + chunks: Array; scores: Array; } -export namespace VectorIoQueryResponse { +export namespace QueryChunksResponse { + /** + * A chunk of content that can be inserted into a vector database. + */ export interface Chunk { /** - * A image content item + * The content of the chunk, which can be interleaved text, images, or other types. */ - content: InferenceAPI.InterleavedContent; + content: Shared.InterleavedContent; + /** + * Metadata associated with the chunk that will be used in the model context during + * inference. + */ metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Metadata for the chunk that will NOT be used in the context during inference. + * The `chunk_metadata` is required backend functionality. + */ + chunk_metadata?: Chunk.ChunkMetadata; + + /** + * Optional embedding for the chunk. If not provided, it will be computed later. + */ + embedding?: Array; + + /** + * The chunk ID that is stored in the vector database. Used for backend + * functionality. + */ + stored_chunk_id?: string; + } + + export namespace Chunk { + /** + * Metadata for the chunk that will NOT be used in the context during inference. + * The `chunk_metadata` is required backend functionality. + */ + export interface ChunkMetadata { + /** + * The dimension of the embedding vector for the chunk. + */ + chunk_embedding_dimension?: number; + + /** + * The embedding model used to create the chunk's embedding. + */ + chunk_embedding_model?: string; + + /** + * The ID of the chunk. If not set, it will be generated based on the document ID + * and content. + */ + chunk_id?: string; + + /** + * The tokenizer used to create the chunk. Default is Tiktoken. + */ + chunk_tokenizer?: string; + + /** + * The window of the chunk, which can be used to group related chunks together. + */ + chunk_window?: string; + + /** + * The number of tokens in the content of the chunk. + */ + content_token_count?: number; + + /** + * An optional timestamp indicating when the chunk was created. + */ + created_timestamp?: number; + + /** + * The ID of the document this chunk belongs to. + */ + document_id?: string; + + /** + * The number of tokens in the metadata of the chunk. + */ + metadata_token_count?: number; + + /** + * The source of the content, such as a URL, file path, or other identifier. + */ + source?: string; + + /** + * An optional timestamp indicating when the chunk was last updated. + */ + updated_timestamp?: number; + } } } export interface VectorIoInsertParams { + /** + * The chunks to insert. Each `Chunk` should contain content which can be + * interleaved text, images, or other types. `metadata`: `dict[str, Any]` and + * `embedding`: `List[float]` are optional. If `metadata` is provided, you + * configure how Llama Stack formats the chunk during generation. If `embedding` is + * not provided, it will be computed later. + */ chunks: Array; + /** + * The identifier of the vector database to insert the chunks into. + */ vector_db_id: string; + /** + * The time to live of the chunks. + */ ttl_seconds?: number; } export namespace VectorIoInsertParams { + /** + * A chunk of content that can be inserted into a vector database. + */ export interface Chunk { /** - * A image content item + * The content of the chunk, which can be interleaved text, images, or other types. */ - content: InferenceAPI.InterleavedContent; + content: Shared.InterleavedContent; + /** + * Metadata associated with the chunk that will be used in the model context during + * inference. + */ metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Metadata for the chunk that will NOT be used in the context during inference. + * The `chunk_metadata` is required backend functionality. + */ + chunk_metadata?: Chunk.ChunkMetadata; + + /** + * Optional embedding for the chunk. If not provided, it will be computed later. + */ + embedding?: Array; + + /** + * The chunk ID that is stored in the vector database. Used for backend + * functionality. + */ + stored_chunk_id?: string; + } + + export namespace Chunk { + /** + * Metadata for the chunk that will NOT be used in the context during inference. + * The `chunk_metadata` is required backend functionality. + */ + export interface ChunkMetadata { + /** + * The dimension of the embedding vector for the chunk. + */ + chunk_embedding_dimension?: number; + + /** + * The embedding model used to create the chunk's embedding. + */ + chunk_embedding_model?: string; + + /** + * The ID of the chunk. If not set, it will be generated based on the document ID + * and content. + */ + chunk_id?: string; + + /** + * The tokenizer used to create the chunk. Default is Tiktoken. + */ + chunk_tokenizer?: string; + + /** + * The window of the chunk, which can be used to group related chunks together. + */ + chunk_window?: string; + + /** + * The number of tokens in the content of the chunk. + */ + content_token_count?: number; + + /** + * An optional timestamp indicating when the chunk was created. + */ + created_timestamp?: number; + + /** + * The ID of the document this chunk belongs to. + */ + document_id?: string; + + /** + * The number of tokens in the metadata of the chunk. + */ + metadata_token_count?: number; + + /** + * The source of the content, such as a URL, file path, or other identifier. + */ + source?: string; + + /** + * An optional timestamp indicating when the chunk was last updated. + */ + updated_timestamp?: number; + } } } export interface VectorIoQueryParams { /** - * A image content item + * The query to search for. */ - query: InferenceAPI.InterleavedContent; + query: Shared.InterleavedContent; + /** + * The identifier of the vector database to query. + */ vector_db_id: string; + /** + * The parameters of the query. + */ params?: { [key: string]: boolean | number | string | Array | unknown | null }; } export declare namespace VectorIo { export { - type VectorIoQueryResponse as VectorIoQueryResponse, + type QueryChunksResponse as QueryChunksResponse, type VectorIoInsertParams as VectorIoInsertParams, type VectorIoQueryParams as VectorIoQueryParams, }; diff --git a/src/resources/eval/benchmarks.ts b/src/resources/vector-stores.ts similarity index 69% rename from src/resources/eval/benchmarks.ts rename to src/resources/vector-stores.ts index f9ae077..e7a3431 100644 --- a/src/resources/eval/benchmarks.ts +++ b/src/resources/vector-stores.ts @@ -1,3 +1,3 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export * from './benchmarks/index'; +export * from './vector-stores/index'; diff --git a/src/resources/vector-stores/files.ts b/src/resources/vector-stores/files.ts new file mode 100644 index 0000000..c3c10be --- /dev/null +++ b/src/resources/vector-stores/files.ts @@ -0,0 +1,254 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; + +export class Files extends APIResource { + /** + * Attach a file to a vector store. + */ + create( + vectorStoreId: string, + body: FileCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/files`, { body, ...options }); + } + + /** + * Retrieves a vector store file. + */ + retrieve( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); + } + + /** + * Updates a vector store file. + */ + update( + vectorStoreId: string, + fileId: string, + body: FileUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, { + body, + ...options, + }); + } + + /** + * List files in a vector store. + */ + list( + vectorStoreId: string, + query?: FileListParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + list(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise; + list( + vectorStoreId: string, + query: FileListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list(vectorStoreId, {}, query); + } + return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}/files`, { query, ...options }); + } + + /** + * Delete a vector store file. + */ + delete( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.delete(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); + } + + /** + * Retrieves the contents of a vector store file. + */ + content( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}/content`, options); + } +} + +/** + * OpenAI Vector Store File object. + */ +export interface VectorStoreFile { + id: string; + + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + chunking_strategy: + | VectorStoreFile.VectorStoreChunkingStrategyAuto + | VectorStoreFile.VectorStoreChunkingStrategyStatic; + + created_at: number; + + object: string; + + status: 'completed' | 'in_progress' | 'cancelled' | 'failed'; + + usage_bytes: number; + + vector_store_id: string; + + last_error?: VectorStoreFile.LastError; +} + +export namespace VectorStoreFile { + export interface VectorStoreChunkingStrategyAuto { + type: 'auto'; + } + + export interface VectorStoreChunkingStrategyStatic { + static: VectorStoreChunkingStrategyStatic.Static; + + type: 'static'; + } + + export namespace VectorStoreChunkingStrategyStatic { + export interface Static { + chunk_overlap_tokens: number; + + max_chunk_size_tokens: number; + } + } + + export interface LastError { + code: 'server_error' | 'rate_limit_exceeded'; + + message: string; + } +} + +/** + * Response from listing vector stores. + */ +export interface FileListResponse { + data: Array; + + has_more: boolean; + + object: string; + + first_id?: string; + + last_id?: string; +} + +/** + * Response from deleting a vector store file. + */ +export interface FileDeleteResponse { + id: string; + + deleted: boolean; + + object: string; +} + +/** + * Response from retrieving the contents of a vector store file. + */ +export interface FileContentResponse { + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + content: Array; + + file_id: string; + + filename: string; +} + +export namespace FileContentResponse { + export interface Content { + text: string; + + type: 'text'; + } +} + +export interface FileCreateParams { + /** + * The ID of the file to attach to the vector store. + */ + file_id: string; + + /** + * The key-value attributes stored with the file, which can be used for filtering. + */ + attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * The chunking strategy to use for the file. + */ + chunking_strategy?: + | FileCreateParams.VectorStoreChunkingStrategyAuto + | FileCreateParams.VectorStoreChunkingStrategyStatic; +} + +export namespace FileCreateParams { + export interface VectorStoreChunkingStrategyAuto { + type: 'auto'; + } + + export interface VectorStoreChunkingStrategyStatic { + static: VectorStoreChunkingStrategyStatic.Static; + + type: 'static'; + } + + export namespace VectorStoreChunkingStrategyStatic { + export interface Static { + chunk_overlap_tokens: number; + + max_chunk_size_tokens: number; + } + } +} + +export interface FileUpdateParams { + /** + * The updated key-value attributes to store with the file. + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; +} + +export interface FileListParams { + after?: string; + + before?: string; + + filter?: 'completed' | 'in_progress' | 'cancelled' | 'failed'; + + limit?: number; + + order?: string; +} + +export declare namespace Files { + export { + type VectorStoreFile as VectorStoreFile, + type FileListResponse as FileListResponse, + type FileDeleteResponse as FileDeleteResponse, + type FileContentResponse as FileContentResponse, + type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, + type FileListParams as FileListParams, + }; +} diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts new file mode 100644 index 0000000..d4d883a --- /dev/null +++ b/src/resources/vector-stores/index.ts @@ -0,0 +1,23 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + Files, + type VectorStoreFile, + type FileListResponse, + type FileDeleteResponse, + type FileContentResponse, + type FileCreateParams, + type FileUpdateParams, + type FileListParams, +} from './files'; +export { + VectorStores, + type ListVectorStoresResponse, + type VectorStore, + type VectorStoreDeleteResponse, + type VectorStoreSearchResponse, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, + type VectorStoreSearchParams, +} from './vector-stores'; diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts new file mode 100644 index 0000000..04efc15 --- /dev/null +++ b/src/resources/vector-stores/vector-stores.ts @@ -0,0 +1,350 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as FilesAPI from './files'; +import { + FileContentResponse, + FileCreateParams, + FileDeleteResponse, + FileListParams, + FileListResponse, + FileUpdateParams, + Files, + VectorStoreFile, +} from './files'; + +export class VectorStores extends APIResource { + files: FilesAPI.Files = new FilesAPI.Files(this._client); + + /** + * Creates a vector store. + */ + create(body: VectorStoreCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/v1/openai/v1/vector_stores', { body, ...options }); + } + + /** + * Retrieves a vector store. + */ + retrieve(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}`, options); + } + + /** + * Updates a vector store. + */ + update( + vectorStoreId: string, + body: VectorStoreUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}`, { body, ...options }); + } + + /** + * Returns a list of vector stores. + */ + list( + query?: VectorStoreListParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + list(options?: Core.RequestOptions): Core.APIPromise; + list( + query: VectorStoreListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.get('/v1/openai/v1/vector_stores', { query, ...options }); + } + + /** + * Delete a vector store. + */ + delete(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/openai/v1/vector_stores/${vectorStoreId}`, options); + } + + /** + * Search for chunks in a vector store. Searches a vector store for relevant chunks + * based on a query and optional file attribute filters. + */ + search( + vectorStoreId: string, + body: VectorStoreSearchParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/search`, { body, ...options }); + } +} + +/** + * Response from listing vector stores. + */ +export interface ListVectorStoresResponse { + data: Array; + + has_more: boolean; + + object: string; + + first_id?: string; + + last_id?: string; +} + +/** + * OpenAI Vector Store object. + */ +export interface VectorStore { + id: string; + + created_at: number; + + file_counts: VectorStore.FileCounts; + + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + object: string; + + status: string; + + usage_bytes: number; + + expires_after?: { [key: string]: boolean | number | string | Array | unknown | null }; + + expires_at?: number; + + last_active_at?: number; + + name?: string; +} + +export namespace VectorStore { + export interface FileCounts { + cancelled: number; + + completed: number; + + failed: number; + + in_progress: number; + + total: number; + } +} + +/** + * Response from deleting a vector store. + */ +export interface VectorStoreDeleteResponse { + id: string; + + deleted: boolean; + + object: string; +} + +/** + * Response from searching a vector store. + */ +export interface VectorStoreSearchResponse { + data: Array; + + has_more: boolean; + + object: string; + + search_query: string; + + next_page?: string; +} + +export namespace VectorStoreSearchResponse { + /** + * Response from searching a vector store. + */ + export interface Data { + content: Array; + + file_id: string; + + filename: string; + + score: number; + + attributes?: { [key: string]: string | number | boolean }; + } + + export namespace Data { + export interface Content { + text: string; + + type: 'text'; + } + } +} + +export interface VectorStoreCreateParams { + /** + * A name for the vector store. + */ + name: string; + + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * The dimension of the embedding vectors (default: 384). + */ + embedding_dimension?: number; + + /** + * The embedding model to use for this vector store. + */ + embedding_model?: string; + + /** + * The expiration policy for a vector store. + */ + expires_after?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * A list of File IDs that the vector store should use. Useful for tools like + * `file_search` that can access files. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. + */ + metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * The ID of the provider to use for this vector store. + */ + provider_id?: string; + + /** + * The provider-specific vector database ID. + */ + provider_vector_db_id?: string; +} + +export interface VectorStoreUpdateParams { + /** + * The expiration policy for a vector store. + */ + expires_after?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Set of 16 key-value pairs that can be attached to an object. + */ + metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * The name of the vector store. + */ + name?: string; +} + +export interface VectorStoreListParams { + /** + * A cursor for use in pagination. `after` is an object ID that defines your place + * in the list. + */ + after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place + * in the list. + */ + before?: string; + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and + * 100, and the default is 20. + */ + limit?: number; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: string; +} + +export interface VectorStoreSearchParams { + /** + * The query string or array for performing the search. + */ + query: string | Array; + + /** + * Filters based on file attributes to narrow the search results. + */ + filters?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Maximum number of results to return (1 to 50 inclusive, default 10). + */ + max_num_results?: number; + + /** + * Ranking options for fine-tuning the search results. + */ + ranking_options?: VectorStoreSearchParams.RankingOptions; + + /** + * Whether to rewrite the natural language query for vector search (default false) + */ + rewrite_query?: boolean; + + /** + * The search mode to use - "keyword", "vector", or "hybrid" (default "vector") + */ + search_mode?: string; +} + +export namespace VectorStoreSearchParams { + /** + * Ranking options for fine-tuning the search results. + */ + export interface RankingOptions { + ranker?: string; + + score_threshold?: number; + } +} + +VectorStores.Files = Files; + +export declare namespace VectorStores { + export { + type ListVectorStoresResponse as ListVectorStoresResponse, + type VectorStore as VectorStore, + type VectorStoreDeleteResponse as VectorStoreDeleteResponse, + type VectorStoreSearchResponse as VectorStoreSearchResponse, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, + }; + + export { + Files as Files, + type VectorStoreFile as VectorStoreFile, + type FileListResponse as FileListResponse, + type FileDeleteResponse as FileDeleteResponse, + type FileContentResponse as FileContentResponse, + type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, + type FileListParams as FileListParams, + }; +} diff --git a/src/resources/version.ts b/src/resources/version.ts deleted file mode 100644 index f2f4b07..0000000 --- a/src/resources/version.ts +++ /dev/null @@ -1,19 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../core/resource'; -import { APIPromise } from '../core/api-promise'; -import { RequestOptions } from '../internal/request-options'; - -export class Version extends APIResource { - retrieve(options?: RequestOptions): APIPromise { - return this._client.get('/v1/version', options); - } -} - -export interface VersionRetrieveResponse { - version: string; -} - -export declare namespace Version { - export { type VersionRetrieveResponse as VersionRetrieveResponse }; -} diff --git a/src/shims/node.ts b/src/shims/node.ts new file mode 100644 index 0000000..73df560 --- /dev/null +++ b/src/shims/node.ts @@ -0,0 +1,50 @@ +// @ts-ignore +import * as types from '../_shims/node-types'; +import { setShims } from '../_shims/registry'; +import { getRuntime } from '../_shims/node-runtime'; +setShims(getRuntime()); + +declare module '../_shims/manual-types' { + export namespace manual { + // @ts-ignore + export type Agent = types.Agent; + // @ts-ignore + export import fetch = types.fetch; + // @ts-ignore + export type Request = types.Request; + // @ts-ignore + export type RequestInfo = types.RequestInfo; + // @ts-ignore + export type RequestInit = types.RequestInit; + // @ts-ignore + export type Response = types.Response; + // @ts-ignore + export type ResponseInit = types.ResponseInit; + // @ts-ignore + export type ResponseType = types.ResponseType; + // @ts-ignore + export type BodyInit = types.BodyInit; + // @ts-ignore + export type Headers = types.Headers; + // @ts-ignore + export type HeadersInit = types.HeadersInit; + // @ts-ignore + export type BlobPropertyBag = types.BlobPropertyBag; + // @ts-ignore + export type FilePropertyBag = types.FilePropertyBag; + // @ts-ignore + export type FileFromPathOptions = types.FileFromPathOptions; + // @ts-ignore + export import FormData = types.FormData; + // @ts-ignore + export import File = types.File; + // @ts-ignore + export import Blob = types.Blob; + // @ts-ignore + export type Readable = types.Readable; + // @ts-ignore + export type FsReadStream = types.FsReadStream; + // @ts-ignore + export import ReadableStream = types.ReadableStream; + } +} diff --git a/src/shims/web.ts b/src/shims/web.ts new file mode 100644 index 0000000..f72d784 --- /dev/null +++ b/src/shims/web.ts @@ -0,0 +1,50 @@ +// @ts-ignore +import * as types from '../_shims/web-types'; +import { setShims } from '../_shims/registry'; +import { getRuntime } from '../_shims/web-runtime'; +setShims(getRuntime({ manuallyImported: true })); + +declare module '../_shims/manual-types' { + export namespace manual { + // @ts-ignore + export type Agent = types.Agent; + // @ts-ignore + export import fetch = types.fetch; + // @ts-ignore + export type Request = types.Request; + // @ts-ignore + export type RequestInfo = types.RequestInfo; + // @ts-ignore + export type RequestInit = types.RequestInit; + // @ts-ignore + export type Response = types.Response; + // @ts-ignore + export type ResponseInit = types.ResponseInit; + // @ts-ignore + export type ResponseType = types.ResponseType; + // @ts-ignore + export type BodyInit = types.BodyInit; + // @ts-ignore + export type Headers = types.Headers; + // @ts-ignore + export type HeadersInit = types.HeadersInit; + // @ts-ignore + export type BlobPropertyBag = types.BlobPropertyBag; + // @ts-ignore + export type FilePropertyBag = types.FilePropertyBag; + // @ts-ignore + export type FileFromPathOptions = types.FileFromPathOptions; + // @ts-ignore + export import FormData = types.FormData; + // @ts-ignore + export import File = types.File; + // @ts-ignore + export import Blob = types.Blob; + // @ts-ignore + export type Readable = types.Readable; + // @ts-ignore + export type FsReadStream = types.FsReadStream; + // @ts-ignore + export import ReadableStream = types.ReadableStream; + } +} diff --git a/src/streaming.ts b/src/streaming.ts new file mode 100644 index 0000000..e3bc8de --- /dev/null +++ b/src/streaming.ts @@ -0,0 +1,291 @@ +import { ReadableStream, type Response } from './_shims/index'; +import { LlamaStackClientError } from './error'; +import { findDoubleNewlineIndex, LineDecoder } from './internal/decoders/line'; +import { ReadableStreamToAsyncIterable } from './internal/stream-utils'; + +type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; + +export type ServerSentEvent = { + event: string | null; + data: string; + raw: string[]; +}; + +export class Stream implements AsyncIterable { + controller: AbortController; + + constructor( + private iterator: () => AsyncIterator, + controller: AbortController, + ) { + this.controller = controller; + } + + static fromSSEResponse(response: Response, controller: AbortController): Stream { + let consumed = false; + + async function* iterator(): AsyncIterator { + if (consumed) { + throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); + } + consumed = true; + let done = false; + try { + for await (const sse of _iterSSEMessages(response, controller)) { + try { + yield JSON.parse(sse.data); + } catch (e) { + console.error(`Could not parse message into JSON:`, sse.data); + console.error(`From chunk:`, sse.raw); + throw e; + } + } + done = true; + } catch (e) { + // If the user calls `stream.controller.abort()`, we should exit without throwing. + if (e instanceof Error && e.name === 'AbortError') return; + throw e; + } finally { + // If the user `break`s, abort the ongoing request. + if (!done) controller.abort(); + } + } + + return new Stream(iterator, controller); + } + + /** + * Generates a Stream from a newline-separated ReadableStream + * where each item is a JSON value. + */ + static fromReadableStream(readableStream: ReadableStream, controller: AbortController): Stream { + let consumed = false; + + async function* iterLines(): AsyncGenerator { + const lineDecoder = new LineDecoder(); + + const iter = ReadableStreamToAsyncIterable(readableStream); + for await (const chunk of iter) { + for (const line of lineDecoder.decode(chunk)) { + yield line; + } + } + + for (const line of lineDecoder.flush()) { + yield line; + } + } + + async function* iterator(): AsyncIterator { + if (consumed) { + throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); + } + consumed = true; + let done = false; + try { + for await (const line of iterLines()) { + if (done) continue; + if (line) yield JSON.parse(line); + } + done = true; + } catch (e) { + // If the user calls `stream.controller.abort()`, we should exit without throwing. + if (e instanceof Error && e.name === 'AbortError') return; + throw e; + } finally { + // If the user `break`s, abort the ongoing request. + if (!done) controller.abort(); + } + } + + return new Stream(iterator, controller); + } + + [Symbol.asyncIterator](): AsyncIterator { + return this.iterator(); + } + + /** + * Splits the stream into two streams which can be + * independently read from at different speeds. + */ + tee(): [Stream, Stream] { + const left: Array>> = []; + const right: Array>> = []; + const iterator = this.iterator(); + + const teeIterator = (queue: Array>>): AsyncIterator => { + return { + next: () => { + if (queue.length === 0) { + const result = iterator.next(); + left.push(result); + right.push(result); + } + return queue.shift()!; + }, + }; + }; + + return [ + new Stream(() => teeIterator(left), this.controller), + new Stream(() => teeIterator(right), this.controller), + ]; + } + + /** + * Converts this stream to a newline-separated ReadableStream of + * JSON stringified values in the stream + * which can be turned back into a Stream with `Stream.fromReadableStream()`. + */ + toReadableStream(): ReadableStream { + const self = this; + let iter: AsyncIterator; + const encoder = new TextEncoder(); + + return new ReadableStream({ + async start() { + iter = self[Symbol.asyncIterator](); + }, + async pull(ctrl: any) { + try { + const { value, done } = await iter.next(); + if (done) return ctrl.close(); + + const bytes = encoder.encode(JSON.stringify(value) + '\n'); + + ctrl.enqueue(bytes); + } catch (err) { + ctrl.error(err); + } + }, + async cancel() { + await iter.return?.(); + }, + }); + } +} + +export async function* _iterSSEMessages( + response: Response, + controller: AbortController, +): AsyncGenerator { + if (!response.body) { + controller.abort(); + throw new LlamaStackClientError(`Attempted to iterate over a response with no body`); + } + + const sseDecoder = new SSEDecoder(); + const lineDecoder = new LineDecoder(); + + const iter = ReadableStreamToAsyncIterable(response.body); + for await (const sseChunk of iterSSEChunks(iter)) { + for (const line of lineDecoder.decode(sseChunk)) { + const sse = sseDecoder.decode(line); + if (sse) yield sse; + } + } + + for (const line of lineDecoder.flush()) { + const sse = sseDecoder.decode(line); + if (sse) yield sse; + } +} + +/** + * Given an async iterable iterator, iterates over it and yields full + * SSE chunks, i.e. yields when a double new-line is encountered. + */ +async function* iterSSEChunks(iterator: AsyncIterableIterator): AsyncGenerator { + let data = new Uint8Array(); + + for await (const chunk of iterator) { + if (chunk == null) { + continue; + } + + const binaryChunk = + chunk instanceof ArrayBuffer ? new Uint8Array(chunk) + : typeof chunk === 'string' ? new TextEncoder().encode(chunk) + : chunk; + + let newData = new Uint8Array(data.length + binaryChunk.length); + newData.set(data); + newData.set(binaryChunk, data.length); + data = newData; + + let patternIndex; + while ((patternIndex = findDoubleNewlineIndex(data)) !== -1) { + yield data.slice(0, patternIndex); + data = data.slice(patternIndex); + } + } + + if (data.length > 0) { + yield data; + } +} + +class SSEDecoder { + private data: string[]; + private event: string | null; + private chunks: string[]; + + constructor() { + this.event = null; + this.data = []; + this.chunks = []; + } + + decode(line: string) { + if (line.endsWith('\r')) { + line = line.substring(0, line.length - 1); + } + + if (!line) { + // empty line and we didn't previously encounter any messages + if (!this.event && !this.data.length) return null; + + const sse: ServerSentEvent = { + event: this.event, + data: this.data.join('\n'), + raw: this.chunks, + }; + + this.event = null; + this.data = []; + this.chunks = []; + + return sse; + } + + this.chunks.push(line); + + if (line.startsWith(':')) { + return null; + } + + let [fieldname, _, value] = partition(line, ':'); + + if (value.startsWith(' ')) { + value = value.substring(1); + } + + if (fieldname === 'event') { + this.event = value; + } else if (fieldname === 'data') { + this.data.push(value); + } + + return null; + } +} + +function partition(str: string, delimiter: string): [string, string, string] { + const index = str.indexOf(delimiter); + if (index !== -1) { + return [str.substring(0, index), delimiter, str.substring(index + delimiter.length)]; + } + + return [str, '', '']; +} diff --git a/src/uploads.ts b/src/uploads.ts index b2ef647..8fd2154 100644 --- a/src/uploads.ts +++ b/src/uploads.ts @@ -1,2 +1,255 @@ -/** @deprecated Import from ./core/uploads instead */ -export * from './core/uploads'; +import { type RequestOptions } from './core'; +import { + FormData, + File, + type Blob, + type FilePropertyBag, + getMultipartRequestOptions, + type FsReadStream, + isFsReadStream, +} from './_shims/index'; +import { MultipartBody } from './_shims/MultipartBody'; +export { fileFromPath } from './_shims/index'; + +type BlobLikePart = string | ArrayBuffer | ArrayBufferView | BlobLike | Uint8Array | DataView; +export type BlobPart = string | ArrayBuffer | ArrayBufferView | Blob | Uint8Array | DataView; + +/** + * Typically, this is a native "File" class. + * + * We provide the {@link toFile} utility to convert a variety of objects + * into the File class. + * + * For convenience, you can also pass a fetch Response, or in Node, + * the result of fs.createReadStream(). + */ +export type Uploadable = FileLike | ResponseLike | FsReadStream; + +/** + * Intended to match web.Blob, node.Blob, node-fetch.Blob, etc. + */ +export interface BlobLike { + /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/size) */ + readonly size: number; + /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/type) */ + readonly type: string; + /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/text) */ + text(): Promise; + /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/slice) */ + slice(start?: number, end?: number): BlobLike; + // unfortunately @types/node-fetch@^2.6.4 doesn't type the arrayBuffer method +} + +/** + * Intended to match web.File, node.File, node-fetch.File, etc. + */ +export interface FileLike extends BlobLike { + /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/lastModified) */ + readonly lastModified: number; + /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/name) */ + readonly name: string; +} + +/** + * Intended to match web.Response, node.Response, node-fetch.Response, etc. + */ +export interface ResponseLike { + url: string; + blob(): Promise; +} + +export const isResponseLike = (value: any): value is ResponseLike => + value != null && + typeof value === 'object' && + typeof value.url === 'string' && + typeof value.blob === 'function'; + +export const isFileLike = (value: any): value is FileLike => + value != null && + typeof value === 'object' && + typeof value.name === 'string' && + typeof value.lastModified === 'number' && + isBlobLike(value); + +/** + * The BlobLike type omits arrayBuffer() because @types/node-fetch@^2.6.4 lacks it; but this check + * adds the arrayBuffer() method type because it is available and used at runtime + */ +export const isBlobLike = (value: any): value is BlobLike & { arrayBuffer(): Promise } => + value != null && + typeof value === 'object' && + typeof value.size === 'number' && + typeof value.type === 'string' && + typeof value.text === 'function' && + typeof value.slice === 'function' && + typeof value.arrayBuffer === 'function'; + +export const isUploadable = (value: any): value is Uploadable => { + return isFileLike(value) || isResponseLike(value) || isFsReadStream(value); +}; + +export type ToFileInput = Uploadable | Exclude | AsyncIterable; + +/** + * Helper for creating a {@link File} to pass to an SDK upload method from a variety of different data formats + * @param value the raw content of the file. Can be an {@link Uploadable}, {@link BlobLikePart}, or {@link AsyncIterable} of {@link BlobLikePart}s + * @param {string=} name the name of the file. If omitted, toFile will try to determine a file name from bits if possible + * @param {Object=} options additional properties + * @param {string=} options.type the MIME type of the content + * @param {number=} options.lastModified the last modified timestamp + * @returns a {@link File} with the given properties + */ +export async function toFile( + value: ToFileInput | PromiseLike, + name?: string | null | undefined, + options?: FilePropertyBag | undefined, +): Promise { + // If it's a promise, resolve it. + value = await value; + + // If we've been given a `File` we don't need to do anything + if (isFileLike(value)) { + return value; + } + + if (isResponseLike(value)) { + const blob = await value.blob(); + name ||= new URL(value.url).pathname.split(/[\\/]/).pop() ?? 'unknown_file'; + + // we need to convert the `Blob` into an array buffer because the `Blob` class + // that `node-fetch` defines is incompatible with the web standard which results + // in `new File` interpreting it as a string instead of binary data. + const data = isBlobLike(blob) ? [(await blob.arrayBuffer()) as any] : [blob]; + + return new File(data, name, options); + } + + const bits = await getBytes(value); + + name ||= getName(value) ?? 'unknown_file'; + + if (!options?.type) { + const type = (bits[0] as any)?.type; + if (typeof type === 'string') { + options = { ...options, type }; + } + } + + return new File(bits, name, options); +} + +async function getBytes(value: ToFileInput): Promise> { + let parts: Array = []; + if ( + typeof value === 'string' || + ArrayBuffer.isView(value) || // includes Uint8Array, Buffer, etc. + value instanceof ArrayBuffer + ) { + parts.push(value); + } else if (isBlobLike(value)) { + parts.push(await value.arrayBuffer()); + } else if ( + isAsyncIterableIterator(value) // includes Readable, ReadableStream, etc. + ) { + for await (const chunk of value) { + parts.push(chunk as BlobPart); // TODO, consider validating? + } + } else { + throw new Error( + `Unexpected data type: ${typeof value}; constructor: ${value?.constructor + ?.name}; props: ${propsForError(value)}`, + ); + } + + return parts; +} + +function propsForError(value: any): string { + const props = Object.getOwnPropertyNames(value); + return `[${props.map((p) => `"${p}"`).join(', ')}]`; +} + +function getName(value: any): string | undefined { + return ( + getStringFromMaybeBuffer(value.name) || + getStringFromMaybeBuffer(value.filename) || + // For fs.ReadStream + getStringFromMaybeBuffer(value.path)?.split(/[\\/]/).pop() + ); +} + +const getStringFromMaybeBuffer = (x: string | Buffer | unknown): string | undefined => { + if (typeof x === 'string') return x; + if (typeof Buffer !== 'undefined' && x instanceof Buffer) return String(x); + return undefined; +}; + +const isAsyncIterableIterator = (value: any): value is AsyncIterableIterator => + value != null && typeof value === 'object' && typeof value[Symbol.asyncIterator] === 'function'; + +export const isMultipartBody = (body: any): body is MultipartBody => + body && typeof body === 'object' && body.body && body[Symbol.toStringTag] === 'MultipartBody'; + +/** + * Returns a multipart/form-data request if any part of the given request body contains a File / Blob value. + * Otherwise returns the request as is. + */ +export const maybeMultipartFormRequestOptions = async >( + opts: RequestOptions, +): Promise> => { + if (!hasUploadableValue(opts.body)) return opts; + + const form = await createForm(opts.body); + return getMultipartRequestOptions(form, opts); +}; + +export const multipartFormRequestOptions = async >( + opts: RequestOptions, +): Promise> => { + const form = await createForm(opts.body); + return getMultipartRequestOptions(form, opts); +}; + +export const createForm = async >(body: T | undefined): Promise => { + const form = new FormData(); + await Promise.all(Object.entries(body || {}).map(([key, value]) => addFormValue(form, key, value))); + return form; +}; + +const hasUploadableValue = (value: unknown): boolean => { + if (isUploadable(value)) return true; + if (Array.isArray(value)) return value.some(hasUploadableValue); + if (value && typeof value === 'object') { + for (const k in value) { + if (hasUploadableValue((value as any)[k])) return true; + } + } + return false; +}; + +const addFormValue = async (form: FormData, key: string, value: unknown): Promise => { + if (value === undefined) return; + if (value == null) { + throw new TypeError( + `Received null for "${key}"; to pass null in FormData, you must use the string 'null'`, + ); + } + + // TODO: make nested formats configurable + if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { + form.append(key, String(value)); + } else if (isUploadable(value)) { + const file = await toFile(value); + form.append(key, file as File); + } else if (Array.isArray(value)) { + await Promise.all(value.map((entry) => addFormValue(form, key + '[]', entry))); + } else if (typeof value === 'object') { + await Promise.all( + Object.entries(value).map(([name, prop]) => addFormValue(form, `${key}[${name}]`, prop)), + ); + } else { + throw new TypeError( + `Invalid value given to form, expected a string, number, boolean, object, Array, File or Blob but got ${value} instead`, + ); + } +}; diff --git a/src/version.ts b/src/version.ts index b0bfd9e..a528f63 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '0.1.0-alpha.1'; // x-release-please-version +export const VERSION = '0.1.0-alpha.2'; // x-release-please-version diff --git a/tests/api-resources/agents/agents.test.ts b/tests/api-resources/agents/agents.test.ts index 4e8ca11..2f22dff 100644 --- a/tests/api-resources/agents/agents.test.ts +++ b/tests/api-resources/agents/agents.test.ts @@ -1,15 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource agents', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { + test('create: only required params', async () => { const responsePromise = client.agents.create({ agent_config: { instructions: 'instructions', model: 'model' }, }); @@ -22,8 +19,7 @@ describe('resource agents', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { + test('create: required and optional params', async () => { const response = await client.agents.create({ agent_config: { instructions: 'instructions', @@ -64,8 +60,7 @@ describe('resource agents', () => { }); }); - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { + test('retrieve', async () => { const responsePromise = client.agents.retrieve('agent_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -76,8 +71,14 @@ describe('resource agents', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list', async () => { + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.agents.retrieve('agent_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('list', async () => { const responsePromise = client.agents.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -88,8 +89,21 @@ describe('resource agents', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('delete', async () => { + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.agents.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.agents.list({ limit: 0, start_index: 0 }, { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('delete', async () => { const responsePromise = client.agents.delete('agent_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -100,15 +114,10 @@ describe('resource agents', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('listSessions', async () => { - const responsePromise = client.agents.listSessions('agent_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); + test('delete: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.agents.delete('agent_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); }); }); diff --git a/tests/api-resources/agents/session.test.ts b/tests/api-resources/agents/session.test.ts new file mode 100644 index 0000000..efcf0e7 --- /dev/null +++ b/tests/api-resources/agents/session.test.ts @@ -0,0 +1,100 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource session', () => { + test('create: only required params', async () => { + const responsePromise = client.agents.session.create('agent_id', { session_name: 'session_name' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.agents.session.create('agent_id', { session_name: 'session_name' }); + }); + + test('retrieve', async () => { + const responsePromise = client.agents.session.retrieve('agent_id', 'session_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.agents.session.retrieve('agent_id', 'session_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('retrieve: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.agents.session.retrieve( + 'agent_id', + 'session_id', + { turn_ids: ['string'] }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.agents.session.list('agent_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.agents.session.list('agent_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.agents.session.list( + 'agent_id', + { limit: 0, start_index: 0 }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('delete', async () => { + const responsePromise = client.agents.session.delete('agent_id', 'session_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('delete: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.agents.session.delete('agent_id', 'session_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/agents/session/session.test.ts b/tests/api-resources/agents/session/session.test.ts deleted file mode 100644 index 627fc54..0000000 --- a/tests/api-resources/agents/session/session.test.ts +++ /dev/null @@ -1,64 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource session', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.agents.session.create('agent_id', { session_name: 'session_name' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.agents.session.create('agent_id', { session_name: 'session_name' }); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieve: only required params', async () => { - const responsePromise = client.agents.session.retrieve('session_id', { agent_id: 'agent_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieve: required and optional params', async () => { - const response = await client.agents.session.retrieve('session_id', { - agent_id: 'agent_id', - turn_ids: ['string'], - }); - }); - - // skipped: tests are disabled for the time being - test.skip('delete: only required params', async () => { - const responsePromise = client.agents.session.delete('session_id', { agent_id: 'agent_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('delete: required and optional params', async () => { - const response = await client.agents.session.delete('session_id', { agent_id: 'agent_id' }); - }); -}); diff --git a/tests/api-resources/agents/session/turn/step.test.ts b/tests/api-resources/agents/session/turn/step.test.ts deleted file mode 100644 index aa3be12..0000000 --- a/tests/api-resources/agents/session/turn/step.test.ts +++ /dev/null @@ -1,35 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource step', () => { - // skipped: tests are disabled for the time being - test.skip('retrieve: only required params', async () => { - const responsePromise = client.agents.session.turn.step.retrieve('step_id', { - agent_id: 'agent_id', - session_id: 'session_id', - turn_id: 'turn_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieve: required and optional params', async () => { - const response = await client.agents.session.turn.step.retrieve('step_id', { - agent_id: 'agent_id', - session_id: 'session_id', - turn_id: 'turn_id', - }); - }); -}); diff --git a/tests/api-resources/agents/steps.test.ts b/tests/api-resources/agents/steps.test.ts new file mode 100644 index 0000000..0696783 --- /dev/null +++ b/tests/api-resources/agents/steps.test.ts @@ -0,0 +1,28 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource steps', () => { + test('retrieve', async () => { + const responsePromise = client.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/agents/session/turn/turn.test.ts b/tests/api-resources/agents/turn.test.ts similarity index 54% rename from tests/api-resources/agents/session/turn/turn.test.ts rename to tests/api-resources/agents/turn.test.ts index a484e0d..dd4e3de 100644 --- a/tests/api-resources/agents/session/turn/turn.test.ts +++ b/tests/api-resources/agents/turn.test.ts @@ -1,17 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource turn', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.agents.session.turn.create('session_id', { - agent_id: 'agent_id', + test('create: only required params', async () => { + const responsePromise = client.agents.turn.create('agent_id', 'session_id', { messages: [{ content: 'string', role: 'user' }], }); const rawResponse = await responsePromise.asResponse(); @@ -23,24 +19,18 @@ describe('resource turn', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.agents.session.turn.create('session_id', { - agent_id: 'agent_id', + test('create: required and optional params', async () => { + const response = await client.agents.turn.create('agent_id', 'session_id', { messages: [{ content: 'string', role: 'user', context: 'string' }], documents: [{ content: 'string', mime_type: 'mime_type' }], - stream: true, + stream: false, tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' }, toolgroups: ['string'], }); }); - // skipped: tests are disabled for the time being - test.skip('retrieve: only required params', async () => { - const responsePromise = client.agents.session.turn.retrieve('turn_id', { - agent_id: 'agent_id', - session_id: 'session_id', - }); + test('retrieve', async () => { + const responsePromise = client.agents.turn.retrieve('agent_id', 'session_id', 'turn_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -50,19 +40,15 @@ describe('resource turn', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('retrieve: required and optional params', async () => { - const response = await client.agents.session.turn.retrieve('turn_id', { - agent_id: 'agent_id', - session_id: 'session_id', - }); + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.agents.turn.retrieve('agent_id', 'session_id', 'turn_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - // skipped: tests are disabled for the time being - test.skip('resume: only required params', async () => { - const responsePromise = client.agents.session.turn.resume('turn_id', { - agent_id: 'agent_id', - session_id: 'session_id', + test('resume: only required params', async () => { + const responsePromise = client.agents.turn.resume('agent_id', 'session_id', 'turn_id', { tool_responses: [{ call_id: 'call_id', content: 'string', tool_name: 'brave_search' }], }); const rawResponse = await responsePromise.asResponse(); @@ -74,15 +60,12 @@ describe('resource turn', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('resume: required and optional params', async () => { - const response = await client.agents.session.turn.resume('turn_id', { - agent_id: 'agent_id', - session_id: 'session_id', + test('resume: required and optional params', async () => { + const response = await client.agents.turn.resume('agent_id', 'session_id', 'turn_id', { tool_responses: [ { call_id: 'call_id', content: 'string', tool_name: 'brave_search', metadata: { foo: true } }, ], - stream: true, + stream: false, }); }); }); diff --git a/tests/api-resources/benchmarks.test.ts b/tests/api-resources/benchmarks.test.ts new file mode 100644 index 0000000..45bc197 --- /dev/null +++ b/tests/api-resources/benchmarks.test.ts @@ -0,0 +1,70 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource benchmarks', () => { + test('retrieve', async () => { + const responsePromise = client.benchmarks.retrieve('benchmark_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.benchmarks.retrieve('benchmark_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.benchmarks.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.benchmarks.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('register: only required params', async () => { + const responsePromise = client.benchmarks.register({ + benchmark_id: 'benchmark_id', + dataset_id: 'dataset_id', + scoring_functions: ['string'], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('register: required and optional params', async () => { + const response = await client.benchmarks.register({ + benchmark_id: 'benchmark_id', + dataset_id: 'dataset_id', + scoring_functions: ['string'], + metadata: { foo: true }, + provider_benchmark_id: 'provider_benchmark_id', + provider_id: 'provider_id', + }); + }); +}); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts new file mode 100644 index 0000000..9c6ff4a --- /dev/null +++ b/tests/api-resources/chat/completions.test.ts @@ -0,0 +1,96 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource completions', () => { + test('create: only required params', async () => { + const responsePromise = client.chat.completions.create({ + messages: [{ content: 'string', role: 'user' }], + model: 'model', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.chat.completions.create({ + messages: [{ content: 'string', role: 'user', name: 'name' }], + model: 'model', + frequency_penalty: 0, + function_call: 'string', + functions: [{ foo: true }], + logit_bias: { foo: 0 }, + logprobs: true, + max_completion_tokens: 0, + max_tokens: 0, + n: 0, + parallel_tool_calls: true, + presence_penalty: 0, + response_format: { type: 'text' }, + seed: 0, + stop: 'string', + stream: false, + stream_options: { foo: true }, + temperature: 0, + tool_choice: 'string', + tools: [{ foo: true }], + top_logprobs: 0, + top_p: 0, + user: 'user', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.chat.completions.retrieve('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.retrieve('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.chat.completions.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.chat.completions.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.list( + { after: 'after', limit: 0, model: 'model', order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts new file mode 100644 index 0000000..736d76a --- /dev/null +++ b/tests/api-resources/completions.test.ts @@ -0,0 +1,44 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource completions', () => { + test('create: only required params', async () => { + const responsePromise = client.completions.create({ model: 'model', prompt: 'string' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.completions.create({ + model: 'model', + prompt: 'string', + best_of: 0, + echo: true, + frequency_penalty: 0, + guided_choice: ['string'], + logit_bias: { foo: 0 }, + logprobs: true, + max_tokens: 0, + n: 0, + presence_penalty: 0, + prompt_logprobs: 0, + seed: 0, + stop: 'string', + stream: false, + stream_options: { foo: true }, + suffix: 'suffix', + temperature: 0, + top_p: 0, + user: 'user', + }); + }); +}); diff --git a/tests/api-resources/datasetio.test.ts b/tests/api-resources/datasetio.test.ts deleted file mode 100644 index e41ea94..0000000 --- a/tests/api-resources/datasetio.test.ts +++ /dev/null @@ -1,51 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource datasetio', () => { - // skipped: tests are disabled for the time being - test.skip('appendRows: only required params', async () => { - const responsePromise = client.datasetio.appendRows('dataset_id', { rows: [{ foo: true }] }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('appendRows: required and optional params', async () => { - const response = await client.datasetio.appendRows('dataset_id', { rows: [{ foo: true }] }); - }); - - // skipped: tests are disabled for the time being - test.skip('iterateRows', async () => { - const responsePromise = client.datasetio.iterateRows('dataset_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('iterateRows: request options and params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.datasetio.iterateRows( - 'dataset_id', - { limit: 0, start_index: 0 }, - { path: '/_stainless_unknown_path' }, - ), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); -}); diff --git a/tests/api-resources/datasets.test.ts b/tests/api-resources/datasets.test.ts index 6236c1e..e0db4c4 100644 --- a/tests/api-resources/datasets.test.ts +++ b/tests/api-resources/datasets.test.ts @@ -1,19 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource datasets', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.datasets.create({ - purpose: 'post-training/messages', - source: { type: 'uri', uri: 'uri' }, - }); + test('retrieve', async () => { + const responsePromise = client.datasets.retrieve('dataset_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,19 +17,15 @@ describe('resource datasets', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.datasets.create({ - purpose: 'post-training/messages', - source: { type: 'uri', uri: 'uri' }, - dataset_id: 'dataset_id', - metadata: { foo: true }, - }); + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.datasets.retrieve('dataset_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.datasets.retrieve('dataset_id'); + test('list', async () => { + const responsePromise = client.datasets.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -45,9 +35,15 @@ describe('resource datasets', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list', async () => { - const responsePromise = client.datasets.list(); + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.datasets.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('appendrows: only required params', async () => { + const responsePromise = client.datasets.appendrows('dataset_id', { rows: [{ foo: true }] }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -57,9 +53,12 @@ describe('resource datasets', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('delete', async () => { - const responsePromise = client.datasets.delete('dataset_id'); + test('appendrows: required and optional params', async () => { + const response = await client.datasets.appendrows('dataset_id', { rows: [{ foo: true }] }); + }); + + test('iterrows', async () => { + const responsePromise = client.datasets.iterrows('dataset_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -68,4 +67,63 @@ describe('resource datasets', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('iterrows: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.datasets.iterrows('dataset_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('iterrows: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.datasets.iterrows( + 'dataset_id', + { limit: 0, start_index: 0 }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('register: only required params', async () => { + const responsePromise = client.datasets.register({ + purpose: 'post-training/messages', + source: { type: 'uri', uri: 'uri' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('register: required and optional params', async () => { + const response = await client.datasets.register({ + purpose: 'post-training/messages', + source: { type: 'uri', uri: 'uri' }, + dataset_id: 'dataset_id', + metadata: { foo: true }, + }); + }); + + test('unregister', async () => { + const responsePromise = client.datasets.unregister('dataset_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('unregister: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.datasets.unregister('dataset_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); }); diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts new file mode 100644 index 0000000..c71aacb --- /dev/null +++ b/tests/api-resources/embeddings.test.ts @@ -0,0 +1,29 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource embeddings', () => { + test('create: only required params', async () => { + const responsePromise = client.embeddings.create({ input: 'string', model: 'model' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.embeddings.create({ + input: 'string', + model: 'model', + dimensions: 0, + encoding_format: 'encoding_format', + user: 'user', + }); + }); +}); diff --git a/tests/api-resources/eval/benchmarks/benchmarks.test.ts b/tests/api-resources/eval/benchmarks/benchmarks.test.ts deleted file mode 100644 index 50a13bd..0000000 --- a/tests/api-resources/eval/benchmarks/benchmarks.test.ts +++ /dev/null @@ -1,119 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource benchmarks', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.eval.benchmarks.create({ - benchmark_id: 'benchmark_id', - dataset_id: 'dataset_id', - scoring_functions: ['string'], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.eval.benchmarks.create({ - benchmark_id: 'benchmark_id', - dataset_id: 'dataset_id', - scoring_functions: ['string'], - metadata: { foo: true }, - provider_benchmark_id: 'provider_benchmark_id', - provider_id: 'provider_id', - }); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.eval.benchmarks.retrieve('benchmark_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('list', async () => { - const responsePromise = client.eval.benchmarks.list(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('evaluate: only required params', async () => { - const responsePromise = client.eval.benchmarks.evaluate('benchmark_id', { - benchmark_config: { - eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - }, - }, - }, - input_rows: [{ foo: true }], - scoring_functions: ['string'], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('evaluate: required and optional params', async () => { - const response = await client.eval.benchmarks.evaluate('benchmark_id', { - benchmark_config: { - eval_candidate: { - model: 'model', - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - type: 'model', - system_message: { content: 'string', role: 'system' }, - }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - prompt_template: 'prompt_template', - }, - }, - num_examples: 0, - }, - input_rows: [{ foo: true }], - scoring_functions: ['string'], - }); - }); -}); diff --git a/tests/api-resources/eval/benchmarks/jobs.test.ts b/tests/api-resources/eval/benchmarks/jobs.test.ts deleted file mode 100644 index 8feee4c..0000000 --- a/tests/api-resources/eval/benchmarks/jobs.test.ts +++ /dev/null @@ -1,114 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource jobs', () => { - // skipped: tests are disabled for the time being - test.skip('retrieve: only required params', async () => { - const responsePromise = client.eval.benchmarks.jobs.retrieve('job_id', { benchmark_id: 'benchmark_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieve: required and optional params', async () => { - const response = await client.eval.benchmarks.jobs.retrieve('job_id', { benchmark_id: 'benchmark_id' }); - }); - - // skipped: tests are disabled for the time being - test.skip('cancel: only required params', async () => { - const responsePromise = client.eval.benchmarks.jobs.cancel('job_id', { benchmark_id: 'benchmark_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('cancel: required and optional params', async () => { - const response = await client.eval.benchmarks.jobs.cancel('job_id', { benchmark_id: 'benchmark_id' }); - }); - - // skipped: tests are disabled for the time being - test.skip('result: only required params', async () => { - const responsePromise = client.eval.benchmarks.jobs.result('job_id', { benchmark_id: 'benchmark_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('result: required and optional params', async () => { - const response = await client.eval.benchmarks.jobs.result('job_id', { benchmark_id: 'benchmark_id' }); - }); - - // skipped: tests are disabled for the time being - test.skip('run: only required params', async () => { - const responsePromise = client.eval.benchmarks.jobs.run('benchmark_id', { - benchmark_config: { - eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - }, - }, - }, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('run: required and optional params', async () => { - const response = await client.eval.benchmarks.jobs.run('benchmark_id', { - benchmark_config: { - eval_candidate: { - model: 'model', - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - type: 'model', - system_message: { content: 'string', role: 'system' }, - }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - prompt_template: 'prompt_template', - }, - }, - num_examples: 0, - }, - }); - }); -}); diff --git a/tests/api-resources/eval/eval.test.ts b/tests/api-resources/eval/eval.test.ts new file mode 100644 index 0000000..9f3e461 --- /dev/null +++ b/tests/api-resources/eval/eval.test.ts @@ -0,0 +1,220 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource eval', () => { + test('evaluateRows: only required params', async () => { + const responsePromise = client.eval.evaluateRows('benchmark_id', { + benchmark_config: { + eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, + scoring_params: { + foo: { + aggregation_functions: ['average'], + judge_model: 'judge_model', + judge_score_regexes: ['string'], + type: 'llm_as_judge', + }, + }, + }, + input_rows: [{ foo: true }], + scoring_functions: ['string'], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('evaluateRows: required and optional params', async () => { + const response = await client.eval.evaluateRows('benchmark_id', { + benchmark_config: { + eval_candidate: { + model: 'model', + sampling_params: { + strategy: { type: 'greedy' }, + max_tokens: 0, + repetition_penalty: 0, + stop: ['string'], + }, + type: 'model', + system_message: { content: 'string', role: 'system' }, + }, + scoring_params: { + foo: { + aggregation_functions: ['average'], + judge_model: 'judge_model', + judge_score_regexes: ['string'], + type: 'llm_as_judge', + prompt_template: 'prompt_template', + }, + }, + num_examples: 0, + }, + input_rows: [{ foo: true }], + scoring_functions: ['string'], + }); + }); + + test('evaluateRowsAlpha: only required params', async () => { + const responsePromise = client.eval.evaluateRowsAlpha('benchmark_id', { + benchmark_config: { + eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, + scoring_params: { + foo: { + aggregation_functions: ['average'], + judge_model: 'judge_model', + judge_score_regexes: ['string'], + type: 'llm_as_judge', + }, + }, + }, + input_rows: [{ foo: true }], + scoring_functions: ['string'], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('evaluateRowsAlpha: required and optional params', async () => { + const response = await client.eval.evaluateRowsAlpha('benchmark_id', { + benchmark_config: { + eval_candidate: { + model: 'model', + sampling_params: { + strategy: { type: 'greedy' }, + max_tokens: 0, + repetition_penalty: 0, + stop: ['string'], + }, + type: 'model', + system_message: { content: 'string', role: 'system' }, + }, + scoring_params: { + foo: { + aggregation_functions: ['average'], + judge_model: 'judge_model', + judge_score_regexes: ['string'], + type: 'llm_as_judge', + prompt_template: 'prompt_template', + }, + }, + num_examples: 0, + }, + input_rows: [{ foo: true }], + scoring_functions: ['string'], + }); + }); + + test('runEval: only required params', async () => { + const responsePromise = client.eval.runEval('benchmark_id', { + benchmark_config: { + eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, + scoring_params: { + foo: { + aggregation_functions: ['average'], + judge_model: 'judge_model', + judge_score_regexes: ['string'], + type: 'llm_as_judge', + }, + }, + }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('runEval: required and optional params', async () => { + const response = await client.eval.runEval('benchmark_id', { + benchmark_config: { + eval_candidate: { + model: 'model', + sampling_params: { + strategy: { type: 'greedy' }, + max_tokens: 0, + repetition_penalty: 0, + stop: ['string'], + }, + type: 'model', + system_message: { content: 'string', role: 'system' }, + }, + scoring_params: { + foo: { + aggregation_functions: ['average'], + judge_model: 'judge_model', + judge_score_regexes: ['string'], + type: 'llm_as_judge', + prompt_template: 'prompt_template', + }, + }, + num_examples: 0, + }, + }); + }); + + test('runEvalAlpha: only required params', async () => { + const responsePromise = client.eval.runEvalAlpha('benchmark_id', { + benchmark_config: { + eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, + scoring_params: { + foo: { + aggregation_functions: ['average'], + judge_model: 'judge_model', + judge_score_regexes: ['string'], + type: 'llm_as_judge', + }, + }, + }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('runEvalAlpha: required and optional params', async () => { + const response = await client.eval.runEvalAlpha('benchmark_id', { + benchmark_config: { + eval_candidate: { + model: 'model', + sampling_params: { + strategy: { type: 'greedy' }, + max_tokens: 0, + repetition_penalty: 0, + stop: ['string'], + }, + type: 'model', + system_message: { content: 'string', role: 'system' }, + }, + scoring_params: { + foo: { + aggregation_functions: ['average'], + judge_model: 'judge_model', + judge_score_regexes: ['string'], + type: 'llm_as_judge', + prompt_template: 'prompt_template', + }, + }, + num_examples: 0, + }, + }); + }); +}); diff --git a/tests/api-resources/eval/jobs.test.ts b/tests/api-resources/eval/jobs.test.ts new file mode 100644 index 0000000..cad4ebd --- /dev/null +++ b/tests/api-resources/eval/jobs.test.ts @@ -0,0 +1,62 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource jobs', () => { + test('retrieve', async () => { + const responsePromise = client.eval.jobs.retrieve('benchmark_id', 'job_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.eval.jobs.retrieve('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('cancel', async () => { + const responsePromise = client.eval.jobs.cancel('benchmark_id', 'job_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.eval.jobs.cancel('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('status', async () => { + const responsePromise = client.eval.jobs.status('benchmark_id', 'job_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('status: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.eval.jobs.status('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts new file mode 100644 index 0000000..6482b2e --- /dev/null +++ b/tests/api-resources/files.test.ts @@ -0,0 +1,111 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient, { toFile } from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource files', () => { + test('create: only required params', async () => { + const responsePromise = client.files.create({ + file: await toFile(Buffer.from('# my file contents'), 'README.md'), + purpose: 'assistants', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.files.create({ + file: await toFile(Buffer.from('# my file contents'), 'README.md'), + purpose: 'assistants', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.files.retrieve('file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.files.retrieve('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('list', async () => { + const responsePromise = client.files.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.files.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.files.list( + { after: 'after', limit: 0, order: 'asc', purpose: 'assistants' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('delete', async () => { + const responsePromise = client.files.delete('file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('delete: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.files.delete('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('content', async () => { + const responsePromise = client.files.content('file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('content: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.files.content('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); +}); diff --git a/tests/api-resources/files/files.test.ts b/tests/api-resources/files/files.test.ts deleted file mode 100644 index 0964d86..0000000 --- a/tests/api-resources/files/files.test.ts +++ /dev/null @@ -1,100 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource files', () => { - // skipped: tests are disabled for the time being - test.skip('retrieve: only required params', async () => { - const responsePromise = client.files.retrieve('key', { bucket: 'bucket' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieve: required and optional params', async () => { - const response = await client.files.retrieve('key', { bucket: 'bucket' }); - }); - - // skipped: tests are disabled for the time being - test.skip('list: only required params', async () => { - const responsePromise = client.files.list({ bucket: 'bucket' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('list: required and optional params', async () => { - const response = await client.files.list({ bucket: 'bucket' }); - }); - - // skipped: tests are disabled for the time being - test.skip('delete: only required params', async () => { - const responsePromise = client.files.delete('key', { bucket: 'bucket' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('delete: required and optional params', async () => { - const response = await client.files.delete('key', { bucket: 'bucket' }); - }); - - // skipped: tests are disabled for the time being - test.skip('createUploadSession: only required params', async () => { - const responsePromise = client.files.createUploadSession({ - bucket: 'bucket', - key: 'key', - mime_type: 'mime_type', - size: 0, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('createUploadSession: required and optional params', async () => { - const response = await client.files.createUploadSession({ - bucket: 'bucket', - key: 'key', - mime_type: 'mime_type', - size: 0, - }); - }); - - // skipped: tests are disabled for the time being - test.skip('listInBucket', async () => { - const responsePromise = client.files.listInBucket('bucket'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); -}); diff --git a/tests/api-resources/files/session.test.ts b/tests/api-resources/files/session.test.ts deleted file mode 100644 index bac0019..0000000 --- a/tests/api-resources/files/session.test.ts +++ /dev/null @@ -1,43 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient, { toFile } from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource session', () => { - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.files.session.retrieve('upload_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('uploadContent: only required params', async () => { - const responsePromise = client.files.session.uploadContent('upload_id', { - body: await toFile(Buffer.from('# my file contents'), 'README.md'), - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('uploadContent: required and optional params', async () => { - const response = await client.files.session.uploadContent('upload_id', { - body: await toFile(Buffer.from('# my file contents'), 'README.md'), - }); - }); -}); diff --git a/tests/api-resources/health.test.ts b/tests/api-resources/health.test.ts deleted file mode 100644 index f60b4f1..0000000 --- a/tests/api-resources/health.test.ts +++ /dev/null @@ -1,22 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource health', () => { - // skipped: tests are disabled for the time being - test.skip('check', async () => { - const responsePromise = client.health.check(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); -}); diff --git a/tests/api-resources/inference.test.ts b/tests/api-resources/inference.test.ts index ecd29a2..f481330 100644 --- a/tests/api-resources/inference.test.ts +++ b/tests/api-resources/inference.test.ts @@ -1,15 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource inference', () => { - // skipped: tests are disabled for the time being - test.skip('batchChatCompletion: only required params', async () => { + test('batchChatCompletion: only required params', async () => { const responsePromise = client.inference.batchChatCompletion({ messages_batch: [[{ content: 'string', role: 'user' }]], model_id: 'model_id', @@ -23,8 +20,7 @@ describe('resource inference', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('batchChatCompletion: required and optional params', async () => { + test('batchChatCompletion: required and optional params', async () => { const response = await client.inference.batchChatCompletion({ messages_batch: [[{ content: 'string', role: 'user', context: 'string' }]], model_id: 'model_id', @@ -49,8 +45,7 @@ describe('resource inference', () => { }); }); - // skipped: tests are disabled for the time being - test.skip('batchCompletion: only required params', async () => { + test('batchCompletion: only required params', async () => { const responsePromise = client.inference.batchCompletion({ content_batch: ['string'], model_id: 'model_id', @@ -64,8 +59,7 @@ describe('resource inference', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('batchCompletion: required and optional params', async () => { + test('batchCompletion: required and optional params', async () => { const response = await client.inference.batchCompletion({ content_batch: ['string'], model_id: 'model_id', @@ -80,8 +74,7 @@ describe('resource inference', () => { }); }); - // skipped: tests are disabled for the time being - test.skip('chatCompletion: only required params', async () => { + test('chatCompletion: only required params', async () => { const responsePromise = client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id', @@ -95,8 +88,7 @@ describe('resource inference', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('chatCompletion: required and optional params', async () => { + test('chatCompletion: required and optional params', async () => { const response = await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user', context: 'string' }], model_id: 'model_id', @@ -108,7 +100,7 @@ describe('resource inference', () => { repetition_penalty: 0, stop: ['string'], }, - stream: true, + stream: false, tool_choice: 'auto', tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' }, tool_prompt_format: 'json', @@ -124,8 +116,7 @@ describe('resource inference', () => { }); }); - // skipped: tests are disabled for the time being - test.skip('completion: only required params', async () => { + test('completion: only required params', async () => { const responsePromise = client.inference.completion({ content: 'string', model_id: 'model_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -136,8 +127,7 @@ describe('resource inference', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('completion: required and optional params', async () => { + test('completion: required and optional params', async () => { const response = await client.inference.completion({ content: 'string', model_id: 'model_id', @@ -149,12 +139,11 @@ describe('resource inference', () => { repetition_penalty: 0, stop: ['string'], }, - stream: true, + stream: false, }); }); - // skipped: tests are disabled for the time being - test.skip('embeddings: only required params', async () => { + test('embeddings: only required params', async () => { const responsePromise = client.inference.embeddings({ contents: ['string'], model_id: 'model_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -165,8 +154,7 @@ describe('resource inference', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('embeddings: required and optional params', async () => { + test('embeddings: required and optional params', async () => { const response = await client.inference.embeddings({ contents: ['string'], model_id: 'model_id', diff --git a/tests/api-resources/inspect.test.ts b/tests/api-resources/inspect.test.ts index e5d298c..291e4a8 100644 --- a/tests/api-resources/inspect.test.ts +++ b/tests/api-resources/inspect.test.ts @@ -1,16 +1,31 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource inspect', () => { - // skipped: tests are disabled for the time being - test.skip('listRoutes', async () => { - const responsePromise = client.inspect.listRoutes(); + test('health', async () => { + const responsePromise = client.inspect.health(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('health: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.inspect.health({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('version', async () => { + const responsePromise = client.inspect.version(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -19,4 +34,11 @@ describe('resource inspect', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('version: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.inspect.version({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); }); diff --git a/tests/api-resources/models.test.ts b/tests/api-resources/models.test.ts index 7fb3bcd..6c47389 100644 --- a/tests/api-resources/models.test.ts +++ b/tests/api-resources/models.test.ts @@ -1,16 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource models', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.models.create({ model_id: 'model_id' }); + test('retrieve', async () => { + const responsePromise = client.models.retrieve('model_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,20 +17,15 @@ describe('resource models', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.models.create({ - model_id: 'model_id', - metadata: { foo: true }, - model_type: 'llm', - provider_id: 'provider_id', - provider_model_id: 'provider_model_id', - }); + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.models.retrieve('model_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); }); - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.models.retrieve('model_id'); + test('list', async () => { + const responsePromise = client.models.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,9 +35,15 @@ describe('resource models', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list', async () => { - const responsePromise = client.models.list(); + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.models.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('register: only required params', async () => { + const responsePromise = client.models.register({ model_id: 'model_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -55,9 +53,18 @@ describe('resource models', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('delete', async () => { - const responsePromise = client.models.delete('model_id'); + test('register: required and optional params', async () => { + const response = await client.models.register({ + model_id: 'model_id', + metadata: { foo: true }, + model_type: 'llm', + provider_id: 'provider_id', + provider_model_id: 'provider_model_id', + }); + }); + + test('unregister', async () => { + const responsePromise = client.models.unregister('model_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -66,4 +73,11 @@ describe('resource models', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('unregister: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.models.unregister('model_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); }); diff --git a/tests/api-resources/openai/v1/chat.test.ts b/tests/api-resources/openai/v1/chat.test.ts deleted file mode 100644 index 88d6c7a..0000000 --- a/tests/api-resources/openai/v1/chat.test.ts +++ /dev/null @@ -1,54 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource chat', () => { - // skipped: tests are disabled for the time being - test.skip('generateCompletion: only required params', async () => { - const responsePromise = client.openai.v1.chat.generateCompletion({ - messages: [{ content: 'string', role: 'user' }], - model: 'model', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('generateCompletion: required and optional params', async () => { - const response = await client.openai.v1.chat.generateCompletion({ - messages: [{ content: 'string', role: 'user', name: 'name' }], - model: 'model', - frequency_penalty: 0, - function_call: 'string', - functions: [{ foo: true }], - logit_bias: { foo: 0 }, - logprobs: true, - max_completion_tokens: 0, - max_tokens: 0, - n: 0, - parallel_tool_calls: true, - presence_penalty: 0, - response_format: { type: 'text' }, - seed: 0, - stop: 'string', - stream: true, - stream_options: { foo: true }, - temperature: 0, - tool_choice: 'string', - tools: [{ foo: true }], - top_logprobs: 0, - top_p: 0, - user: 'user', - }); - }); -}); diff --git a/tests/api-resources/openai/v1/responses.test.ts b/tests/api-resources/openai/v1/responses.test.ts deleted file mode 100644 index e9859b5..0000000 --- a/tests/api-resources/openai/v1/responses.test.ts +++ /dev/null @@ -1,47 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource responses', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.openai.v1.responses.create({ input: 'string', model: 'model' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.openai.v1.responses.create({ - input: 'string', - model: 'model', - previous_response_id: 'previous_response_id', - store: true, - stream: true, - temperature: 0, - tools: [{ type: 'web_search', search_context_size: 'search_context_size' }], - }); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.openai.v1.responses.retrieve('id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); -}); diff --git a/tests/api-resources/openai/v1/v1.test.ts b/tests/api-resources/openai/v1/v1.test.ts deleted file mode 100644 index 5db6621..0000000 --- a/tests/api-resources/openai/v1/v1.test.ts +++ /dev/null @@ -1,59 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource v1', () => { - // skipped: tests are disabled for the time being - test.skip('generateCompletion: only required params', async () => { - const responsePromise = client.openai.v1.generateCompletion({ model: 'model', prompt: 'string' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('generateCompletion: required and optional params', async () => { - const response = await client.openai.v1.generateCompletion({ - model: 'model', - prompt: 'string', - best_of: 0, - echo: true, - frequency_penalty: 0, - guided_choice: ['string'], - logit_bias: { foo: 0 }, - logprobs: true, - max_tokens: 0, - n: 0, - presence_penalty: 0, - prompt_logprobs: 0, - seed: 0, - stop: 'string', - stream: true, - stream_options: { foo: true }, - temperature: 0, - top_p: 0, - user: 'user', - }); - }); - - // skipped: tests are disabled for the time being - test.skip('listModels', async () => { - const responsePromise = client.openai.v1.listModels(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); -}); diff --git a/tests/api-resources/post-training/job.test.ts b/tests/api-resources/post-training/job.test.ts index d7c29c0..0cb1ebb 100644 --- a/tests/api-resources/post-training/job.test.ts +++ b/tests/api-resources/post-training/job.test.ts @@ -1,16 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource job', () => { - // skipped: tests are disabled for the time being - test.skip('cancel: only required params', async () => { - const responsePromise = client.postTraining.job.cancel({ job_uuid: 'job_uuid' }); + test('list', async () => { + const responsePromise = client.postTraining.job.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,14 +17,30 @@ describe('resource job', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('cancel: required and optional params', async () => { - const response = await client.postTraining.job.cancel({ job_uuid: 'job_uuid' }); + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.postTraining.job.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('artifacts: only required params', async () => { + const responsePromise = client.postTraining.job.artifacts({ job_uuid: 'job_uuid' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('retrieveArtifacts: only required params', async () => { - const responsePromise = client.postTraining.job.retrieveArtifacts({ job_uuid: 'job_uuid' }); + test('artifacts: required and optional params', async () => { + const response = await client.postTraining.job.artifacts({ job_uuid: 'job_uuid' }); + }); + + test('cancel: only required params', async () => { + const responsePromise = client.postTraining.job.cancel({ job_uuid: 'job_uuid' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -37,14 +50,12 @@ describe('resource job', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('retrieveArtifacts: required and optional params', async () => { - const response = await client.postTraining.job.retrieveArtifacts({ job_uuid: 'job_uuid' }); + test('cancel: required and optional params', async () => { + const response = await client.postTraining.job.cancel({ job_uuid: 'job_uuid' }); }); - // skipped: tests are disabled for the time being - test.skip('retrieveStatus: only required params', async () => { - const responsePromise = client.postTraining.job.retrieveStatus({ job_uuid: 'job_uuid' }); + test('status: only required params', async () => { + const responsePromise = client.postTraining.job.status({ job_uuid: 'job_uuid' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -54,8 +65,7 @@ describe('resource job', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('retrieveStatus: required and optional params', async () => { - const response = await client.postTraining.job.retrieveStatus({ job_uuid: 'job_uuid' }); + test('status: required and optional params', async () => { + const response = await client.postTraining.job.status({ job_uuid: 'job_uuid' }); }); }); diff --git a/tests/api-resources/post-training/post-training.test.ts b/tests/api-resources/post-training/post-training.test.ts index 1215b79..d8967bb 100644 --- a/tests/api-resources/post-training/post-training.test.ts +++ b/tests/api-resources/post-training/post-training.test.ts @@ -1,16 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource postTraining', () => { - // skipped: tests are disabled for the time being - test.skip('fineTuneSupervised: only required params', async () => { - const responsePromise = client.postTraining.fineTuneSupervised({ + test('preferenceOptimize: only required params', async () => { + const responsePromise = client.postTraining.preferenceOptimize({ + algorithm_config: { epsilon: 0, gamma: 0, reward_clip: 0, reward_scale: 0 }, + finetuned_model: 'finetuned_model', hyperparam_search_config: { foo: true }, job_uuid: 'job_uuid', logger_config: { foo: true }, @@ -25,9 +24,10 @@ describe('resource postTraining', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('fineTuneSupervised: required and optional params', async () => { - const response = await client.postTraining.fineTuneSupervised({ + test('preferenceOptimize: required and optional params', async () => { + const response = await client.postTraining.preferenceOptimize({ + algorithm_config: { epsilon: 0, gamma: 0, reward_clip: 0, reward_scale: 0 }, + finetuned_model: 'finetuned_model', hyperparam_search_config: { foo: true }, job_uuid: 'job_uuid', logger_config: { foo: true }, @@ -54,38 +54,11 @@ describe('resource postTraining', () => { max_validation_steps: 0, optimizer_config: { lr: 0, num_warmup_steps: 0, optimizer_type: 'adam', weight_decay: 0 }, }, - algorithm_config: { - alpha: 0, - apply_lora_to_mlp: true, - apply_lora_to_output: true, - lora_attn_modules: ['string'], - rank: 0, - type: 'LoRA', - quantize_base: true, - use_dora: true, - }, - checkpoint_dir: 'checkpoint_dir', - model: 'model', }); }); - // skipped: tests are disabled for the time being - test.skip('listJobs', async () => { - const responsePromise = client.postTraining.listJobs(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('optimizePreferences: only required params', async () => { - const responsePromise = client.postTraining.optimizePreferences({ - algorithm_config: { epsilon: 0, gamma: 0, reward_clip: 0, reward_scale: 0 }, - finetuned_model: 'finetuned_model', + test('supervisedFineTune: only required params', async () => { + const responsePromise = client.postTraining.supervisedFineTune({ hyperparam_search_config: { foo: true }, job_uuid: 'job_uuid', logger_config: { foo: true }, @@ -100,11 +73,8 @@ describe('resource postTraining', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('optimizePreferences: required and optional params', async () => { - const response = await client.postTraining.optimizePreferences({ - algorithm_config: { epsilon: 0, gamma: 0, reward_clip: 0, reward_scale: 0 }, - finetuned_model: 'finetuned_model', + test('supervisedFineTune: required and optional params', async () => { + const response = await client.postTraining.supervisedFineTune({ hyperparam_search_config: { foo: true }, job_uuid: 'job_uuid', logger_config: { foo: true }, @@ -131,6 +101,18 @@ describe('resource postTraining', () => { max_validation_steps: 0, optimizer_config: { lr: 0, num_warmup_steps: 0, optimizer_type: 'adam', weight_decay: 0 }, }, + algorithm_config: { + alpha: 0, + apply_lora_to_mlp: true, + apply_lora_to_output: true, + lora_attn_modules: ['string'], + rank: 0, + type: 'LoRA', + quantize_base: true, + use_dora: true, + }, + checkpoint_dir: 'checkpoint_dir', + model: 'model', }); }); }); diff --git a/tests/api-resources/providers.test.ts b/tests/api-resources/providers.test.ts index 8666045..fb34e96 100644 --- a/tests/api-resources/providers.test.ts +++ b/tests/api-resources/providers.test.ts @@ -1,15 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource providers', () => { - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { + test('retrieve', async () => { const responsePromise = client.providers.retrieve('provider_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -20,8 +17,14 @@ describe('resource providers', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list', async () => { + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.providers.retrieve('provider_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list', async () => { const responsePromise = client.providers.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -31,4 +34,11 @@ describe('resource providers', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.providers.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); }); diff --git a/tests/api-resources/responses/input-items.test.ts b/tests/api-resources/responses/input-items.test.ts new file mode 100644 index 0000000..9f59ffe --- /dev/null +++ b/tests/api-resources/responses/input-items.test.ts @@ -0,0 +1,37 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource inputItems', () => { + test('list', async () => { + const responsePromise = client.responses.inputItems.list('response_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list('response_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list( + 'response_id', + { after: 'after', before: 'before', include: ['string'], limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts new file mode 100644 index 0000000..79575ae --- /dev/null +++ b/tests/api-resources/responses/responses.test.ts @@ -0,0 +1,88 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource responses', () => { + test('create: only required params', async () => { + const responsePromise = client.responses.create({ input: 'string', model: 'model' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.responses.create({ + input: 'string', + model: 'model', + instructions: 'instructions', + max_infer_iters: 0, + previous_response_id: 'previous_response_id', + store: true, + stream: false, + temperature: 0, + text: { + format: { + type: 'text', + description: 'description', + name: 'name', + schema: { foo: true }, + strict: true, + }, + }, + tools: [{ type: 'web_search', search_context_size: 'search_context_size' }], + }); + }); + + test('retrieve', async () => { + const responsePromise = client.responses.retrieve('response_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.retrieve('response_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.responses.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.responses.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.list( + { after: 'after', limit: 0, model: 'model', order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/routes.test.ts b/tests/api-resources/routes.test.ts new file mode 100644 index 0000000..4373ab6 --- /dev/null +++ b/tests/api-resources/routes.test.ts @@ -0,0 +1,26 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource routes', () => { + test('list', async () => { + const responsePromise = client.routes.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.routes.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); +}); diff --git a/tests/api-resources/safety.test.ts b/tests/api-resources/safety.test.ts index c7740db..4ca2ca6 100644 --- a/tests/api-resources/safety.test.ts +++ b/tests/api-resources/safety.test.ts @@ -1,15 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource safety', () => { - // skipped: tests are disabled for the time being - test.skip('runShield: only required params', async () => { + test('runShield: only required params', async () => { const responsePromise = client.safety.runShield({ messages: [{ content: 'string', role: 'user' }], params: { foo: true }, @@ -24,8 +21,7 @@ describe('resource safety', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('runShield: required and optional params', async () => { + test('runShield: required and optional params', async () => { const response = await client.safety.runShield({ messages: [{ content: 'string', role: 'user', context: 'string' }], params: { foo: true }, diff --git a/tests/api-resources/scoring-functions.test.ts b/tests/api-resources/scoring-functions.test.ts index 836be2b..9d2a4fb 100644 --- a/tests/api-resources/scoring-functions.test.ts +++ b/tests/api-resources/scoring-functions.test.ts @@ -1,20 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource scoringFunctions', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.scoringFunctions.create({ - description: 'description', - return_type: { type: 'string' }, - scoring_fn_id: 'scoring_fn_id', - }); + test('retrieve', async () => { + const responsePromise = client.scoringFunctions.retrieve('scoring_fn_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -24,27 +17,15 @@ describe('resource scoringFunctions', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.scoringFunctions.create({ - description: 'description', - return_type: { type: 'string' }, - scoring_fn_id: 'scoring_fn_id', - params: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - prompt_template: 'prompt_template', - }, - provider_id: 'provider_id', - provider_scoring_fn_id: 'provider_scoring_fn_id', - }); + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.scoringFunctions.retrieve('scoring_fn_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.scoringFunctions.retrieve('scoring_fn_id'); + test('list', async () => { + const responsePromise = client.scoringFunctions.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -54,9 +35,19 @@ describe('resource scoringFunctions', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list', async () => { - const responsePromise = client.scoringFunctions.list(); + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.scoringFunctions.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('register: only required params', async () => { + const responsePromise = client.scoringFunctions.register({ + description: 'description', + return_type: { type: 'string' }, + scoring_fn_id: 'scoring_fn_id', + }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -65,4 +56,21 @@ describe('resource scoringFunctions', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('register: required and optional params', async () => { + const response = await client.scoringFunctions.register({ + description: 'description', + return_type: { type: 'string' }, + scoring_fn_id: 'scoring_fn_id', + params: { + aggregation_functions: ['average'], + judge_model: 'judge_model', + judge_score_regexes: ['string'], + type: 'llm_as_judge', + prompt_template: 'prompt_template', + }, + provider_id: 'provider_id', + provider_scoring_fn_id: 'provider_scoring_fn_id', + }); + }); }); diff --git a/tests/api-resources/scoring.test.ts b/tests/api-resources/scoring.test.ts index 115ee26..5486176 100644 --- a/tests/api-resources/scoring.test.ts +++ b/tests/api-resources/scoring.test.ts @@ -1,15 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource scoring', () => { - // skipped: tests are disabled for the time being - test.skip('score: only required params', async () => { + test('score: only required params', async () => { const responsePromise = client.scoring.score({ input_rows: [{ foo: true }], scoring_functions: { @@ -30,8 +27,7 @@ describe('resource scoring', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('score: required and optional params', async () => { + test('score: required and optional params', async () => { const response = await client.scoring.score({ input_rows: [{ foo: true }], scoring_functions: { @@ -46,8 +42,7 @@ describe('resource scoring', () => { }); }); - // skipped: tests are disabled for the time being - test.skip('scoreBatch: only required params', async () => { + test('scoreBatch: only required params', async () => { const responsePromise = client.scoring.scoreBatch({ dataset_id: 'dataset_id', save_results_dataset: true, @@ -69,8 +64,7 @@ describe('resource scoring', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('scoreBatch: required and optional params', async () => { + test('scoreBatch: required and optional params', async () => { const response = await client.scoring.scoreBatch({ dataset_id: 'dataset_id', save_results_dataset: true, diff --git a/tests/api-resources/shields.test.ts b/tests/api-resources/shields.test.ts index 054e092..af58610 100644 --- a/tests/api-resources/shields.test.ts +++ b/tests/api-resources/shields.test.ts @@ -1,16 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource shields', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.shields.create({ shield_id: 'shield_id' }); + test('retrieve', async () => { + const responsePromise = client.shields.retrieve('identifier'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,19 +17,15 @@ describe('resource shields', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.shields.create({ - shield_id: 'shield_id', - params: { foo: true }, - provider_id: 'provider_id', - provider_shield_id: 'provider_shield_id', - }); + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.shields.retrieve('identifier', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); }); - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.shields.retrieve('identifier'); + test('list', async () => { + const responsePromise = client.shields.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -42,9 +35,15 @@ describe('resource shields', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list', async () => { - const responsePromise = client.shields.list(); + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.shields.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('register: only required params', async () => { + const responsePromise = client.shields.register({ shield_id: 'shield_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -53,4 +52,13 @@ describe('resource shields', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('register: required and optional params', async () => { + const response = await client.shields.register({ + shield_id: 'shield_id', + params: { foo: true }, + provider_id: 'provider_id', + provider_shield_id: 'provider_shield_id', + }); + }); }); diff --git a/tests/api-resources/synthetic-data-generation.test.ts b/tests/api-resources/synthetic-data-generation.test.ts index 01fc99e..ce0c6cb 100644 --- a/tests/api-resources/synthetic-data-generation.test.ts +++ b/tests/api-resources/synthetic-data-generation.test.ts @@ -1,15 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource syntheticDataGeneration', () => { - // skipped: tests are disabled for the time being - test.skip('generate: only required params', async () => { + test('generate: only required params', async () => { const responsePromise = client.syntheticDataGeneration.generate({ dialogs: [{ content: 'string', role: 'user' }], filtering_function: 'none', @@ -23,8 +20,7 @@ describe('resource syntheticDataGeneration', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('generate: required and optional params', async () => { + test('generate: required and optional params', async () => { const response = await client.syntheticDataGeneration.generate({ dialogs: [{ content: 'string', role: 'user', context: 'string' }], filtering_function: 'none', diff --git a/tests/api-resources/telemetry.test.ts b/tests/api-resources/telemetry.test.ts new file mode 100644 index 0000000..0c2b8e0 --- /dev/null +++ b/tests/api-resources/telemetry.test.ts @@ -0,0 +1,151 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource telemetry', () => { + test('getSpan', async () => { + const responsePromise = client.telemetry.getSpan('trace_id', 'span_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('getSpan: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.telemetry.getSpan('trace_id', 'span_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('getSpanTree', async () => { + const responsePromise = client.telemetry.getSpanTree('span_id', {}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('getTrace', async () => { + const responsePromise = client.telemetry.getTrace('trace_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('getTrace: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.telemetry.getTrace('trace_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('logEvent: only required params', async () => { + const responsePromise = client.telemetry.logEvent({ + event: { + message: 'message', + severity: 'verbose', + span_id: 'span_id', + timestamp: '2019-12-27T18:11:19.117Z', + trace_id: 'trace_id', + type: 'unstructured_log', + }, + ttl_seconds: 0, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('logEvent: required and optional params', async () => { + const response = await client.telemetry.logEvent({ + event: { + message: 'message', + severity: 'verbose', + span_id: 'span_id', + timestamp: '2019-12-27T18:11:19.117Z', + trace_id: 'trace_id', + type: 'unstructured_log', + attributes: { foo: 'string' }, + }, + ttl_seconds: 0, + }); + }); + + // unsupported query params in java / kotlin + test.skip('querySpans: only required params', async () => { + const responsePromise = client.telemetry.querySpans({ + attribute_filters: [{ key: 'key', op: 'eq', value: true }], + attributes_to_return: ['string'], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + // unsupported query params in java / kotlin + test.skip('querySpans: required and optional params', async () => { + const response = await client.telemetry.querySpans({ + attribute_filters: [{ key: 'key', op: 'eq', value: true }], + attributes_to_return: ['string'], + max_depth: 0, + }); + }); + + // unsupported query params in java / kotlin + test.skip('queryTraces', async () => { + const responsePromise = client.telemetry.queryTraces({}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('saveSpansToDataset: only required params', async () => { + const responsePromise = client.telemetry.saveSpansToDataset({ + attribute_filters: [{ key: 'key', op: 'eq', value: true }], + attributes_to_save: ['string'], + dataset_id: 'dataset_id', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('saveSpansToDataset: required and optional params', async () => { + const response = await client.telemetry.saveSpansToDataset({ + attribute_filters: [{ key: 'key', op: 'eq', value: true }], + attributes_to_save: ['string'], + dataset_id: 'dataset_id', + max_depth: 0, + }); + }); +}); diff --git a/tests/api-resources/telemetry/spans.test.ts b/tests/api-resources/telemetry/spans.test.ts deleted file mode 100644 index a9d100e..0000000 --- a/tests/api-resources/telemetry/spans.test.ts +++ /dev/null @@ -1,72 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource spans', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.telemetry.spans.create({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_return: ['string'], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.telemetry.spans.create({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_return: ['string'], - max_depth: 0, - }); - }); - - // skipped: tests are disabled for the time being - test.skip('buildTree', async () => { - const responsePromise = client.telemetry.spans.buildTree('span_id', {}); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('export: only required params', async () => { - const responsePromise = client.telemetry.spans.export({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_save: ['string'], - dataset_id: 'dataset_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('export: required and optional params', async () => { - const response = await client.telemetry.spans.export({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_save: ['string'], - dataset_id: 'dataset_id', - max_depth: 0, - }); - }); -}); diff --git a/tests/api-resources/telemetry/telemetry.test.ts b/tests/api-resources/telemetry/telemetry.test.ts deleted file mode 100644 index d925952..0000000 --- a/tests/api-resources/telemetry/telemetry.test.ts +++ /dev/null @@ -1,48 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource telemetry', () => { - // skipped: tests are disabled for the time being - test.skip('createEvent: only required params', async () => { - const responsePromise = client.telemetry.createEvent({ - event: { - message: 'message', - severity: 'verbose', - span_id: 'span_id', - timestamp: '2019-12-27T18:11:19.117Z', - trace_id: 'trace_id', - type: 'unstructured_log', - }, - ttl_seconds: 0, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('createEvent: required and optional params', async () => { - const response = await client.telemetry.createEvent({ - event: { - message: 'message', - severity: 'verbose', - span_id: 'span_id', - timestamp: '2019-12-27T18:11:19.117Z', - trace_id: 'trace_id', - type: 'unstructured_log', - attributes: { foo: 'string' }, - }, - ttl_seconds: 0, - }); - }); -}); diff --git a/tests/api-resources/telemetry/traces.test.ts b/tests/api-resources/telemetry/traces.test.ts deleted file mode 100644 index 56cdac2..0000000 --- a/tests/api-resources/telemetry/traces.test.ts +++ /dev/null @@ -1,51 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource traces', () => { - // skipped: tests are disabled for the time being - test.skip('create', async () => { - const responsePromise = client.telemetry.traces.create({}); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieveSpan: only required params', async () => { - const responsePromise = client.telemetry.traces.retrieveSpan('span_id', { trace_id: 'trace_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieveSpan: required and optional params', async () => { - const response = await client.telemetry.traces.retrieveSpan('span_id', { trace_id: 'trace_id' }); - }); - - // skipped: tests are disabled for the time being - test.skip('retrieveTrace', async () => { - const responsePromise = client.telemetry.traces.retrieveTrace('trace_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); -}); diff --git a/tests/api-resources/tool-runtime/rag-tool.test.ts b/tests/api-resources/tool-runtime/rag-tool.test.ts index fcc88ba..fa8432b 100644 --- a/tests/api-resources/tool-runtime/rag-tool.test.ts +++ b/tests/api-resources/tool-runtime/rag-tool.test.ts @@ -1,16 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource ragTool', () => { - // skipped: tests are disabled for the time being - test.skip('insertDocuments: only required params', async () => { - const responsePromise = client.toolRuntime.ragTool.insertDocuments({ + test('insert: only required params', async () => { + const responsePromise = client.toolRuntime.ragTool.insert({ chunk_size_in_tokens: 0, documents: [{ content: 'string', document_id: 'document_id', metadata: { foo: true } }], vector_db_id: 'vector_db_id', @@ -24,9 +21,8 @@ describe('resource ragTool', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('insertDocuments: required and optional params', async () => { - const response = await client.toolRuntime.ragTool.insertDocuments({ + test('insert: required and optional params', async () => { + const response = await client.toolRuntime.ragTool.insert({ chunk_size_in_tokens: 0, documents: [ { content: 'string', document_id: 'document_id', metadata: { foo: true }, mime_type: 'mime_type' }, @@ -35,9 +31,8 @@ describe('resource ragTool', () => { }); }); - // skipped: tests are disabled for the time being - test.skip('queryContext: only required params', async () => { - const responsePromise = client.toolRuntime.ragTool.queryContext({ + test('query: only required params', async () => { + const responsePromise = client.toolRuntime.ragTool.query({ content: 'string', vector_db_ids: ['string'], }); @@ -50,15 +45,17 @@ describe('resource ragTool', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('queryContext: required and optional params', async () => { - const response = await client.toolRuntime.ragTool.queryContext({ + test('query: required and optional params', async () => { + const response = await client.toolRuntime.ragTool.query({ content: 'string', vector_db_ids: ['string'], query_config: { + chunk_template: 'chunk_template', max_chunks: 0, max_tokens_in_context: 0, query_generator_config: { separator: 'separator', type: 'default' }, + mode: 'mode', + ranker: { impact_factor: 0, type: 'rrf' }, }, }); }); diff --git a/tests/api-resources/tool-runtime/tool-runtime.test.ts b/tests/api-resources/tool-runtime/tool-runtime.test.ts index 230b6fc..71e4f41 100644 --- a/tests/api-resources/tool-runtime/tool-runtime.test.ts +++ b/tests/api-resources/tool-runtime/tool-runtime.test.ts @@ -1,15 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource toolRuntime', () => { - // skipped: tests are disabled for the time being - test.skip('invokeTool: only required params', async () => { + test('invokeTool: only required params', async () => { const responsePromise = client.toolRuntime.invokeTool({ kwargs: { foo: true }, tool_name: 'tool_name' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -20,13 +17,11 @@ describe('resource toolRuntime', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('invokeTool: required and optional params', async () => { + test('invokeTool: required and optional params', async () => { const response = await client.toolRuntime.invokeTool({ kwargs: { foo: true }, tool_name: 'tool_name' }); }); - // skipped: tests are disabled for the time being - test.skip('listTools', async () => { + test('listTools', async () => { const responsePromise = client.toolRuntime.listTools(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -37,8 +32,14 @@ describe('resource toolRuntime', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('listTools: request options and params are passed correctly', async () => { + test('listTools: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.toolRuntime.listTools({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('listTools: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( client.toolRuntime.listTools( diff --git a/tests/api-resources/toolgroups.test.ts b/tests/api-resources/toolgroups.test.ts index 9e03490..2bb4891 100644 --- a/tests/api-resources/toolgroups.test.ts +++ b/tests/api-resources/toolgroups.test.ts @@ -1,16 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource toolgroups', () => { - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.toolgroups.retrieve('toolgroup_id'); + test('list', async () => { + const responsePromise = client.toolgroups.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,9 +17,15 @@ describe('resource toolgroups', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list', async () => { - const responsePromise = client.toolgroups.list(); + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.toolgroups.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('get', async () => { + const responsePromise = client.toolgroups.get('toolgroup_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -32,8 +35,14 @@ describe('resource toolgroups', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('register: only required params', async () => { + test('get: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.toolgroups.get('toolgroup_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('register: only required params', async () => { const responsePromise = client.toolgroups.register({ provider_id: 'provider_id', toolgroup_id: 'toolgroup_id', @@ -47,8 +56,7 @@ describe('resource toolgroups', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('register: required and optional params', async () => { + test('register: required and optional params', async () => { const response = await client.toolgroups.register({ provider_id: 'provider_id', toolgroup_id: 'toolgroup_id', @@ -57,8 +65,7 @@ describe('resource toolgroups', () => { }); }); - // skipped: tests are disabled for the time being - test.skip('unregister', async () => { + test('unregister', async () => { const responsePromise = client.toolgroups.unregister('toolgroup_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -68,4 +75,11 @@ describe('resource toolgroups', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('unregister: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.toolgroups.unregister('toolgroup_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); }); diff --git a/tests/api-resources/tools.test.ts b/tests/api-resources/tools.test.ts index b534c7d..5a79e49 100644 --- a/tests/api-resources/tools.test.ts +++ b/tests/api-resources/tools.test.ts @@ -1,16 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource tools', () => { - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.tools.retrieve('tool_name'); + test('list', async () => { + const responsePromise = client.tools.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,9 +17,22 @@ describe('resource tools', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list', async () => { - const responsePromise = client.tools.list(); + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.tools.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.tools.list({ toolgroup_id: 'toolgroup_id' }, { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('get', async () => { + const responsePromise = client.tools.get('tool_name'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -32,11 +42,10 @@ describe('resource tools', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list: request options and params are passed correctly', async () => { + test('get: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.tools.list({ toolgroup_id: 'toolgroup_id' }, { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); + await expect(client.tools.get('tool_name', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); }); }); diff --git a/tests/api-resources/vector-dbs.test.ts b/tests/api-resources/vector-dbs.test.ts index 8d9bc1f..dc27b0a 100644 --- a/tests/api-resources/vector-dbs.test.ts +++ b/tests/api-resources/vector-dbs.test.ts @@ -1,19 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource vectorDBs', () => { - // skipped: tests are disabled for the time being - test.skip('create: only required params', async () => { - const responsePromise = client.vectorDBs.create({ - embedding_model: 'embedding_model', - vector_db_id: 'vector_db_id', - }); + test('retrieve', async () => { + const responsePromise = client.vectorDBs.retrieve('vector_db_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,20 +17,15 @@ describe('resource vectorDBs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('create: required and optional params', async () => { - const response = await client.vectorDBs.create({ - embedding_model: 'embedding_model', - vector_db_id: 'vector_db_id', - embedding_dimension: 0, - provider_id: 'provider_id', - provider_vector_db_id: 'provider_vector_db_id', - }); + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorDBs.retrieve('vector_db_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.vectorDBs.retrieve('vector_db_id'); + test('list', async () => { + const responsePromise = client.vectorDBs.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -46,9 +35,18 @@ describe('resource vectorDBs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('list', async () => { - const responsePromise = client.vectorDBs.list(); + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.vectorDBs.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('register: only required params', async () => { + const responsePromise = client.vectorDBs.register({ + embedding_model: 'embedding_model', + vector_db_id: 'vector_db_id', + }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -58,9 +56,18 @@ describe('resource vectorDBs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('delete', async () => { - const responsePromise = client.vectorDBs.delete('vector_db_id'); + test('register: required and optional params', async () => { + const response = await client.vectorDBs.register({ + embedding_model: 'embedding_model', + vector_db_id: 'vector_db_id', + embedding_dimension: 0, + provider_id: 'provider_id', + provider_vector_db_id: 'provider_vector_db_id', + }); + }); + + test('unregister', async () => { + const responsePromise = client.vectorDBs.unregister('vector_db_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -69,4 +76,11 @@ describe('resource vectorDBs', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); + + test('unregister: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorDBs.unregister('vector_db_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); }); diff --git a/tests/api-resources/vector-io.test.ts b/tests/api-resources/vector-io.test.ts index 7138813..51380d7 100644 --- a/tests/api-resources/vector-io.test.ts +++ b/tests/api-resources/vector-io.test.ts @@ -1,15 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource vectorIo', () => { - // skipped: tests are disabled for the time being - test.skip('insert: only required params', async () => { + test('insert: only required params', async () => { const responsePromise = client.vectorIo.insert({ chunks: [{ content: 'string', metadata: { foo: true } }], vector_db_id: 'vector_db_id', @@ -23,17 +20,35 @@ describe('resource vectorIo', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('insert: required and optional params', async () => { + test('insert: required and optional params', async () => { const response = await client.vectorIo.insert({ - chunks: [{ content: 'string', metadata: { foo: true } }], + chunks: [ + { + content: 'string', + metadata: { foo: true }, + chunk_metadata: { + chunk_embedding_dimension: 0, + chunk_embedding_model: 'chunk_embedding_model', + chunk_id: 'chunk_id', + chunk_tokenizer: 'chunk_tokenizer', + chunk_window: 'chunk_window', + content_token_count: 0, + created_timestamp: 0, + document_id: 'document_id', + metadata_token_count: 0, + source: 'source', + updated_timestamp: 0, + }, + embedding: [0], + stored_chunk_id: 'stored_chunk_id', + }, + ], vector_db_id: 'vector_db_id', ttl_seconds: 0, }); }); - // skipped: tests are disabled for the time being - test.skip('query: only required params', async () => { + test('query: only required params', async () => { const responsePromise = client.vectorIo.query({ query: 'string', vector_db_id: 'vector_db_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -44,8 +59,7 @@ describe('resource vectorIo', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // skipped: tests are disabled for the time being - test.skip('query: required and optional params', async () => { + test('query: required and optional params', async () => { const response = await client.vectorIo.query({ query: 'string', vector_db_id: 'vector_db_id', diff --git a/tests/api-resources/vector-stores/files.test.ts b/tests/api-resources/vector-stores/files.test.ts new file mode 100644 index 0000000..1f6c452 --- /dev/null +++ b/tests/api-resources/vector-stores/files.test.ts @@ -0,0 +1,129 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource files', () => { + test('create: only required params', async () => { + const responsePromise = client.vectorStores.files.create('vector_store_id', { file_id: 'file_id' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.vectorStores.files.create('vector_store_id', { + file_id: 'file_id', + attributes: { foo: true }, + chunking_strategy: { type: 'auto' }, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.vectorStores.files.retrieve('vector_store_id', 'file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.retrieve('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('update: only required params', async () => { + const responsePromise = client.vectorStores.files.update('vector_store_id', 'file_id', { + attributes: { foo: true }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.vectorStores.files.update('vector_store_id', 'file_id', { + attributes: { foo: true }, + }); + }); + + test('list', async () => { + const responsePromise = client.vectorStores.files.list('vector_store_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list( + 'vector_store_id', + { after: 'after', before: 'before', filter: 'completed', limit: 0, order: 'order' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('delete', async () => { + const responsePromise = client.vectorStores.files.delete('vector_store_id', 'file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('delete: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.delete('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('content', async () => { + const responsePromise = client.vectorStores.files.content('vector_store_id', 'file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('content: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.content('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/vector-stores/vector-stores.test.ts b/tests/api-resources/vector-stores/vector-stores.test.ts new file mode 100644 index 0000000..a34c1cb --- /dev/null +++ b/tests/api-resources/vector-stores/vector-stores.test.ts @@ -0,0 +1,130 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource vectorStores', () => { + test('create: only required params', async () => { + const responsePromise = client.vectorStores.create({ name: 'name' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.vectorStores.create({ + name: 'name', + chunking_strategy: { foo: true }, + embedding_dimension: 0, + embedding_model: 'embedding_model', + expires_after: { foo: true }, + file_ids: ['string'], + metadata: { foo: true }, + provider_id: 'provider_id', + provider_vector_db_id: 'provider_vector_db_id', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.vectorStores.retrieve('vector_store_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('update', async () => { + const responsePromise = client.vectorStores.update('vector_store_id', {}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list', async () => { + const responsePromise = client.vectorStores.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.list( + { after: 'after', before: 'before', limit: 0, order: 'order' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('delete', async () => { + const responsePromise = client.vectorStores.delete('vector_store_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('delete: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.delete('vector_store_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('search: only required params', async () => { + const responsePromise = client.vectorStores.search('vector_store_id', { query: 'string' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('search: required and optional params', async () => { + const response = await client.vectorStores.search('vector_store_id', { + query: 'string', + filters: { foo: true }, + max_num_results: 0, + ranking_options: { ranker: 'ranker', score_threshold: 0 }, + rewrite_query: true, + search_mode: 'search_mode', + }); + }); +}); diff --git a/tests/api-resources/version.test.ts b/tests/api-resources/version.test.ts deleted file mode 100644 index c53ff8a..0000000 --- a/tests/api-resources/version.test.ts +++ /dev/null @@ -1,22 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', -}); - -describe('resource version', () => { - // skipped: tests are disabled for the time being - test.skip('retrieve', async () => { - const responsePromise = client.version.retrieve(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); -}); diff --git a/tests/base64.test.ts b/tests/base64.test.ts deleted file mode 100644 index fed0ece..0000000 --- a/tests/base64.test.ts +++ /dev/null @@ -1,80 +0,0 @@ -import { fromBase64, toBase64 } from 'llama-stack-client/internal/utils/base64'; - -describe.each(['Buffer', 'atob'])('with %s', (mode) => { - let originalBuffer: BufferConstructor; - beforeAll(() => { - if (mode === 'atob') { - originalBuffer = globalThis.Buffer; - // @ts-expect-error Can't assign undefined to BufferConstructor - delete globalThis.Buffer; - } - }); - afterAll(() => { - if (mode === 'atob') { - globalThis.Buffer = originalBuffer; - } - }); - test('toBase64', () => { - const testCases = [ - { - input: 'hello world', - expected: 'aGVsbG8gd29ybGQ=', - }, - { - input: new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]), - expected: 'aGVsbG8gd29ybGQ=', - }, - { - input: undefined, - expected: '', - }, - { - input: new Uint8Array([ - 229, 102, 215, 230, 65, 22, 46, 87, 243, 176, 99, 99, 31, 174, 8, 242, 83, 142, 169, 64, 122, 123, - 193, 71, - ]), - expected: '5WbX5kEWLlfzsGNjH64I8lOOqUB6e8FH', - }, - { - input: '✓', - expected: '4pyT', - }, - { - input: new Uint8Array([226, 156, 147]), - expected: '4pyT', - }, - ]; - - testCases.forEach(({ input, expected }) => { - expect(toBase64(input)).toBe(expected); - }); - }); - - test('fromBase64', () => { - const testCases = [ - { - input: 'aGVsbG8gd29ybGQ=', - expected: new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]), - }, - { - input: '', - expected: new Uint8Array([]), - }, - { - input: '5WbX5kEWLlfzsGNjH64I8lOOqUB6e8FH', - expected: new Uint8Array([ - 229, 102, 215, 230, 65, 22, 46, 87, 243, 176, 99, 99, 31, 174, 8, 242, 83, 142, 169, 64, 122, 123, - 193, 71, - ]), - }, - { - input: '4pyT', - expected: new Uint8Array([226, 156, 147]), - }, - ]; - - testCases.forEach(({ input, expected }) => { - expect(fromBase64(input)).toEqual(expected); - }); - }); -}); diff --git a/tests/buildHeaders.test.ts b/tests/buildHeaders.test.ts deleted file mode 100644 index 4a3c473..0000000 --- a/tests/buildHeaders.test.ts +++ /dev/null @@ -1,88 +0,0 @@ -import { inspect } from 'node:util'; -import { buildHeaders, type HeadersLike, type NullableHeaders } from 'llama-stack-client/internal/headers'; - -function inspectNullableHeaders(headers: NullableHeaders) { - return `NullableHeaders {${[ - ...[...headers.values.entries()].map(([name, value]) => ` ${inspect(name)}: ${inspect(value)}`), - ...[...headers.nulls].map((name) => ` ${inspect(name)}: null`), - ].join(', ')} }`; -} - -describe('buildHeaders', () => { - const cases: [HeadersLike[], string][] = [ - [[new Headers({ 'content-type': 'text/plain' })], `NullableHeaders { 'content-type': 'text/plain' }`], - [ - [ - { - 'content-type': 'text/plain', - }, - { - 'Content-Type': undefined, - }, - ], - `NullableHeaders { 'content-type': 'text/plain' }`, - ], - [ - [ - { - 'content-type': 'text/plain', - }, - { - 'Content-Type': null, - }, - ], - `NullableHeaders { 'content-type': null }`, - ], - [ - [ - { - cookie: 'name1=value1', - Cookie: 'name2=value2', - }, - ], - `NullableHeaders { 'cookie': 'name2=value2' }`, - ], - [ - [ - { - cookie: 'name1=value1', - Cookie: undefined, - }, - ], - `NullableHeaders { 'cookie': 'name1=value1' }`, - ], - [ - [ - { - cookie: ['name1=value1', 'name2=value2'], - }, - ], - `NullableHeaders { 'cookie': 'name1=value1; name2=value2' }`, - ], - [ - [ - { - 'x-foo': ['name1=value1', 'name2=value2'], - }, - ], - `NullableHeaders { 'x-foo': 'name1=value1, name2=value2' }`, - ], - [ - [ - [ - ['cookie', 'name1=value1'], - ['cookie', 'name2=value2'], - ['Cookie', 'name3=value3'], - ], - ], - `NullableHeaders { 'cookie': 'name1=value1; name2=value2; name3=value3' }`, - ], - [[undefined], `NullableHeaders { }`], - [[null], `NullableHeaders { }`], - ]; - for (const [input, expected] of cases) { - test(expected, () => { - expect(inspectNullableHeaders(buildHeaders(input))).toEqual(expected); - }); - } -}); diff --git a/tests/form.test.ts b/tests/form.test.ts index 2ecbda8..b23a617 100644 --- a/tests/form.test.ts +++ b/tests/form.test.ts @@ -1,85 +1,65 @@ -import { multipartFormRequestOptions, createForm } from 'llama-stack-client/internal/uploads'; -import { toFile } from 'llama-stack-client/core/uploads'; +import { multipartFormRequestOptions, createForm } from 'llama-stack-client/core'; +import { Blob } from 'llama-stack-client/_shims/index'; +import { toFile } from 'llama-stack-client'; describe('form data validation', () => { test('valid values do not error', async () => { - await multipartFormRequestOptions( - { - body: { - foo: 'foo', - string: 1, - bool: true, - file: await toFile(Buffer.from('some-content')), - blob: new Blob(['Some content'], { type: 'text/plain' }), - }, + await multipartFormRequestOptions({ + body: { + foo: 'foo', + string: 1, + bool: true, + file: await toFile(Buffer.from('some-content')), + blob: new Blob(['Some content'], { type: 'text/plain' }), }, - fetch, - ); + }); }); test('null', async () => { await expect(() => - multipartFormRequestOptions( - { - body: { - null: null, - }, + multipartFormRequestOptions({ + body: { + null: null, }, - fetch, - ), + }), ).rejects.toThrow(TypeError); }); test('undefined is stripped', async () => { - const form = await createForm( - { - foo: undefined, - bar: 'baz', - }, - fetch, - ); + const form = await createForm({ + foo: undefined, + bar: 'baz', + }); expect(form.has('foo')).toBe(false); expect(form.get('bar')).toBe('baz'); }); test('nested undefined property is stripped', async () => { - const form = await createForm( - { - bar: { - baz: undefined, - }, + const form = await createForm({ + bar: { + baz: undefined, }, - fetch, - ); + }); expect(Array.from(form.entries())).toEqual([]); - const form2 = await createForm( - { - bar: { - foo: 'string', - baz: undefined, - }, + const form2 = await createForm({ + bar: { + foo: 'string', + baz: undefined, }, - fetch, - ); + }); expect(Array.from(form2.entries())).toEqual([['bar[foo]', 'string']]); }); test('nested undefined array item is stripped', async () => { - const form = await createForm( - { - bar: [undefined, undefined], - }, - fetch, - ); + const form = await createForm({ + bar: [undefined, undefined], + }); expect(Array.from(form.entries())).toEqual([]); - const form2 = await createForm( - { - bar: [undefined, 'foo'], - }, - fetch, - ); + const form2 = await createForm({ + bar: [undefined, 'foo'], + }); expect(Array.from(form2.entries())).toEqual([['bar[]', 'foo']]); }); }); diff --git a/tests/index.test.ts b/tests/index.test.ts index ae4e9a9..eca2222 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -1,11 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIPromise } from 'llama-stack-client/core/api-promise'; - -import util from 'node:util'; import LlamaStackClient from 'llama-stack-client'; import { APIUserAbortError } from 'llama-stack-client'; -const defaultFetch = fetch; +import { Headers } from 'llama-stack-client/core'; +import defaultFetch, { Response, type RequestInit, type RequestInfo } from 'node-fetch'; describe('instantiate client', () => { const env = process.env; @@ -13,6 +11,8 @@ describe('instantiate client', () => { beforeEach(() => { jest.resetModules(); process.env = { ...env }; + + console.warn = jest.fn(); }); afterEach(() => { @@ -23,12 +23,11 @@ describe('instantiate client', () => { const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/', defaultHeaders: { 'X-My-Default-Header': '2' }, - apiKey: 'My API Key', }); test('they are used in the request', () => { const { req } = client.buildRequest({ path: '/foo', method: 'post' }); - expect(req.headers.get('x-my-default-header')).toEqual('2'); + expect((req.headers as Headers)['x-my-default-header']).toEqual('2'); }); test('can ignore `undefined` and leave the default', () => { @@ -37,7 +36,7 @@ describe('instantiate client', () => { method: 'post', headers: { 'X-My-Default-Header': undefined }, }); - expect(req.headers.get('x-my-default-header')).toEqual('2'); + expect((req.headers as Headers)['x-my-default-header']).toEqual('2'); }); test('can be removed with `null`', () => { @@ -46,136 +45,7 @@ describe('instantiate client', () => { method: 'post', headers: { 'X-My-Default-Header': null }, }); - expect(req.headers.has('x-my-default-header')).toBe(false); - }); - }); - describe('logging', () => { - const env = process.env; - - beforeEach(() => { - process.env = { ...env }; - process.env['LLAMA_STACK_CLIENT_LOG'] = undefined; - }); - - afterEach(() => { - process.env = env; - }); - - const forceAPIResponseForClient = async (client: LlamaStackClient) => { - await new APIPromise( - client, - Promise.resolve({ - response: new Response(), - controller: new AbortController(), - requestLogID: 'log_000000', - retryOfRequestLogID: undefined, - startTime: Date.now(), - options: { - method: 'get', - path: '/', - }, - }), - ); - }; - - test('debug logs when log level is debug', async () => { - const debugMock = jest.fn(); - const logger = { - debug: debugMock, - info: jest.fn(), - warn: jest.fn(), - error: jest.fn(), - }; - - const client = new LlamaStackClient({ logger: logger, logLevel: 'debug', apiKey: 'My API Key' }); - - await forceAPIResponseForClient(client); - expect(debugMock).toHaveBeenCalled(); - }); - - test('default logLevel is warn', async () => { - const client = new LlamaStackClient({ apiKey: 'My API Key' }); - expect(client.logLevel).toBe('warn'); - }); - - test('debug logs are skipped when log level is info', async () => { - const debugMock = jest.fn(); - const logger = { - debug: debugMock, - info: jest.fn(), - warn: jest.fn(), - error: jest.fn(), - }; - - const client = new LlamaStackClient({ logger: logger, logLevel: 'info', apiKey: 'My API Key' }); - - await forceAPIResponseForClient(client); - expect(debugMock).not.toHaveBeenCalled(); - }); - - test('debug logs happen with debug env var', async () => { - const debugMock = jest.fn(); - const logger = { - debug: debugMock, - info: jest.fn(), - warn: jest.fn(), - error: jest.fn(), - }; - - process.env['LLAMA_STACK_CLIENT_LOG'] = 'debug'; - const client = new LlamaStackClient({ logger: logger, apiKey: 'My API Key' }); - expect(client.logLevel).toBe('debug'); - - await forceAPIResponseForClient(client); - expect(debugMock).toHaveBeenCalled(); - }); - - test('warn when env var level is invalid', async () => { - const warnMock = jest.fn(); - const logger = { - debug: jest.fn(), - info: jest.fn(), - warn: warnMock, - error: jest.fn(), - }; - - process.env['LLAMA_STACK_CLIENT_LOG'] = 'not a log level'; - const client = new LlamaStackClient({ logger: logger, apiKey: 'My API Key' }); - expect(client.logLevel).toBe('warn'); - expect(warnMock).toHaveBeenCalledWith( - 'process.env[\'LLAMA_STACK_CLIENT_LOG\'] was set to "not a log level", expected one of ["off","error","warn","info","debug"]', - ); - }); - - test('client log level overrides env var', async () => { - const debugMock = jest.fn(); - const logger = { - debug: debugMock, - info: jest.fn(), - warn: jest.fn(), - error: jest.fn(), - }; - - process.env['LLAMA_STACK_CLIENT_LOG'] = 'debug'; - const client = new LlamaStackClient({ logger: logger, logLevel: 'off', apiKey: 'My API Key' }); - - await forceAPIResponseForClient(client); - expect(debugMock).not.toHaveBeenCalled(); - }); - - test('no warning logged for invalid env var level + valid client level', async () => { - const warnMock = jest.fn(); - const logger = { - debug: jest.fn(), - info: jest.fn(), - warn: warnMock, - error: jest.fn(), - }; - - process.env['LLAMA_STACK_CLIENT_LOG'] = 'not a log level'; - const client = new LlamaStackClient({ logger: logger, logLevel: 'debug', apiKey: 'My API Key' }); - expect(client.logLevel).toBe('debug'); - expect(warnMock).not.toHaveBeenCalled(); + expect(req.headers as Headers).not.toHaveProperty('x-my-default-header'); }); }); @@ -184,7 +54,6 @@ describe('instantiate client', () => { const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/', defaultQuery: { apiVersion: 'foo' }, - apiKey: 'My API Key', }); expect(client.buildURL('/foo', null)).toEqual('http://localhost:5000/foo?apiVersion=foo'); }); @@ -193,7 +62,6 @@ describe('instantiate client', () => { const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/', defaultQuery: { apiVersion: 'foo', hello: 'world' }, - apiKey: 'My API Key', }); expect(client.buildURL('/foo', null)).toEqual('http://localhost:5000/foo?apiVersion=foo&hello=world'); }); @@ -202,7 +70,6 @@ describe('instantiate client', () => { const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/', defaultQuery: { hello: 'world' }, - apiKey: 'My API Key', }); expect(client.buildURL('/foo', { hello: undefined })).toEqual('http://localhost:5000/foo'); }); @@ -211,7 +78,6 @@ describe('instantiate client', () => { test('custom fetch', async () => { const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/', - apiKey: 'My API Key', fetch: (url) => { return Promise.resolve( new Response(JSON.stringify({ url, custom: true }), { @@ -227,17 +93,12 @@ describe('instantiate client', () => { test('explicit global fetch', async () => { // make sure the global fetch type is assignable to our Fetch type - const client = new LlamaStackClient({ - baseURL: 'http://localhost:5000/', - apiKey: 'My API Key', - fetch: defaultFetch, - }); + const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/', fetch: defaultFetch }); }); test('custom signal', async () => { const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', - apiKey: 'My API Key', fetch: (...args) => { return new Promise((resolve, reject) => setTimeout( @@ -262,16 +123,12 @@ describe('instantiate client', () => { test('normalized method', async () => { let capturedRequest: RequestInit | undefined; - const testFetch = async (url: string | URL | Request, init: RequestInit = {}): Promise => { + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { capturedRequest = init; return new Response(JSON.stringify({}), { headers: { 'Content-Type': 'application/json' } }); }; - const client = new LlamaStackClient({ - baseURL: 'http://localhost:5000/', - apiKey: 'My API Key', - fetch: testFetch, - }); + const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/', fetch: testFetch }); await client.patch('/foo'); expect(capturedRequest?.method).toEqual('PATCH'); @@ -279,18 +136,12 @@ describe('instantiate client', () => { describe('baseUrl', () => { test('trailing slash', () => { - const client = new LlamaStackClient({ - baseURL: 'http://localhost:5000/custom/path/', - apiKey: 'My API Key', - }); + const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/custom/path/' }); expect(client.buildURL('/foo', null)).toEqual('http://localhost:5000/custom/path/foo'); }); test('no trailing slash', () => { - const client = new LlamaStackClient({ - baseURL: 'http://localhost:5000/custom/path', - apiKey: 'My API Key', - }); + const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/custom/path' }); expect(client.buildURL('/foo', null)).toEqual('http://localhost:5000/custom/path/foo'); }); @@ -299,37 +150,37 @@ describe('instantiate client', () => { }); test('explicit option', () => { - const client = new LlamaStackClient({ baseURL: 'https://example.com', apiKey: 'My API Key' }); + const client = new LlamaStackClient({ baseURL: 'https://example.com' }); expect(client.baseURL).toEqual('https://example.com'); }); test('env variable', () => { process.env['LLAMA_STACK_CLIENT_BASE_URL'] = 'https://example.com/from_env'; - const client = new LlamaStackClient({ apiKey: 'My API Key' }); + const client = new LlamaStackClient({}); expect(client.baseURL).toEqual('https://example.com/from_env'); }); test('empty env variable', () => { process.env['LLAMA_STACK_CLIENT_BASE_URL'] = ''; // empty - const client = new LlamaStackClient({ apiKey: 'My API Key' }); + const client = new LlamaStackClient({}); expect(client.baseURL).toEqual('http://any-hosted-llama-stack.com'); }); test('blank env variable', () => { process.env['LLAMA_STACK_CLIENT_BASE_URL'] = ' '; // blank - const client = new LlamaStackClient({ apiKey: 'My API Key' }); + const client = new LlamaStackClient({}); expect(client.baseURL).toEqual('http://any-hosted-llama-stack.com'); }); test('in request options', () => { - const client = new LlamaStackClient({ apiKey: 'My API Key' }); + const client = new LlamaStackClient({}); expect(client.buildURL('/foo', null, 'http://localhost:5000/option')).toEqual( 'http://localhost:5000/option/foo', ); }); test('in request options overridden by client options', () => { - const client = new LlamaStackClient({ apiKey: 'My API Key', baseURL: 'http://localhost:5000/client' }); + const client = new LlamaStackClient({ baseURL: 'http://localhost:5000/client' }); expect(client.buildURL('/foo', null, 'http://localhost:5000/option')).toEqual( 'http://localhost:5000/client/foo', ); @@ -337,7 +188,7 @@ describe('instantiate client', () => { test('in request options overridden by env variable', () => { process.env['LLAMA_STACK_CLIENT_BASE_URL'] = 'http://localhost:5000/env'; - const client = new LlamaStackClient({ apiKey: 'My API Key' }); + const client = new LlamaStackClient({}); expect(client.buildURL('/foo', null, 'http://localhost:5000/option')).toEqual( 'http://localhost:5000/env/foo', ); @@ -345,108 +196,30 @@ describe('instantiate client', () => { }); test('maxRetries option is correctly set', () => { - const client = new LlamaStackClient({ maxRetries: 4, apiKey: 'My API Key' }); + const client = new LlamaStackClient({ maxRetries: 4 }); expect(client.maxRetries).toEqual(4); // default - const client2 = new LlamaStackClient({ apiKey: 'My API Key' }); + const client2 = new LlamaStackClient({}); expect(client2.maxRetries).toEqual(2); }); +}); - describe('withOptions', () => { - test('creates a new client with overridden options', () => { - const client = new LlamaStackClient({ - baseURL: 'http://localhost:5000/', - maxRetries: 3, - apiKey: 'My API Key', - }); - - const newClient = client.withOptions({ - maxRetries: 5, - baseURL: 'http://localhost:5001/', - }); - - // Verify the new client has updated options - expect(newClient.maxRetries).toEqual(5); - expect(newClient.baseURL).toEqual('http://localhost:5001/'); - - // Verify the original client is unchanged - expect(client.maxRetries).toEqual(3); - expect(client.baseURL).toEqual('http://localhost:5000/'); - - // Verify it's a different instance - expect(newClient).not.toBe(client); - expect(newClient.constructor).toBe(client.constructor); - }); - - test('inherits options from the parent client', () => { - const client = new LlamaStackClient({ - baseURL: 'http://localhost:5000/', - defaultHeaders: { 'X-Test-Header': 'test-value' }, - defaultQuery: { 'test-param': 'test-value' }, - apiKey: 'My API Key', - }); - - const newClient = client.withOptions({ - baseURL: 'http://localhost:5001/', - }); - - // Test inherited options remain the same - expect(newClient.buildURL('/foo', null)).toEqual('http://localhost:5001/foo?test-param=test-value'); +describe('request building', () => { + const client = new LlamaStackClient({}); - const { req } = newClient.buildRequest({ path: '/foo', method: 'get' }); - expect(req.headers.get('x-test-header')).toEqual('test-value'); + describe('Content-Length', () => { + test('handles multi-byte characters', () => { + const { req } = client.buildRequest({ path: '/foo', method: 'post', body: { value: '—' } }); + expect((req.headers as Record)['content-length']).toEqual('20'); }); - test('respects runtime property changes when creating new client', () => { - const client = new LlamaStackClient({ - baseURL: 'http://localhost:5000/', - timeout: 1000, - apiKey: 'My API Key', - }); - - // Modify the client properties directly after creation - client.baseURL = 'http://localhost:6000/'; - client.timeout = 2000; - - // Create a new client with withOptions - const newClient = client.withOptions({ - maxRetries: 10, - }); - - // Verify the new client uses the updated properties, not the original ones - expect(newClient.baseURL).toEqual('http://localhost:6000/'); - expect(newClient.timeout).toEqual(2000); - expect(newClient.maxRetries).toEqual(10); - - // Original client should still have its modified properties - expect(client.baseURL).toEqual('http://localhost:6000/'); - expect(client.timeout).toEqual(2000); - expect(client.maxRetries).not.toEqual(10); - - // Verify URL building uses the updated baseURL - expect(newClient.buildURL('/bar', null)).toEqual('http://localhost:6000/bar'); + test('handles standard characters', () => { + const { req } = client.buildRequest({ path: '/foo', method: 'post', body: { value: 'hello' } }); + expect((req.headers as Record)['content-length']).toEqual('22'); }); }); - test('with environment variable arguments', () => { - // set options via env var - process.env['LLAMA_STACK_CLIENT_API_KEY'] = 'My API Key'; - const client = new LlamaStackClient(); - expect(client.apiKey).toBe('My API Key'); - }); - - test('with overridden environment variable arguments', () => { - // set options via env var - process.env['LLAMA_STACK_CLIENT_API_KEY'] = 'another My API Key'; - const client = new LlamaStackClient({ apiKey: 'My API Key' }); - expect(client.apiKey).toBe('My API Key'); - }); -}); - -describe('request building', () => { - const client = new LlamaStackClient({ apiKey: 'My API Key' }); - describe('custom headers', () => { test('handles undefined', () => { const { req } = client.buildRequest({ @@ -455,92 +228,18 @@ describe('request building', () => { body: { value: 'hello' }, headers: { 'X-Foo': 'baz', 'x-foo': 'bar', 'x-Foo': undefined, 'x-baz': 'bam', 'X-Baz': null }, }); - expect(req.headers.get('x-foo')).toEqual('bar'); - expect(req.headers.get('x-Foo')).toEqual('bar'); - expect(req.headers.get('X-Foo')).toEqual('bar'); - expect(req.headers.get('x-baz')).toEqual(null); + expect((req.headers as Record)['x-foo']).toEqual('bar'); + expect((req.headers as Record)['x-Foo']).toEqual(undefined); + expect((req.headers as Record)['X-Foo']).toEqual(undefined); + expect((req.headers as Record)['x-baz']).toEqual(undefined); }); }); }); -describe('default encoder', () => { - const client = new LlamaStackClient({ apiKey: 'My API Key' }); - - class Serializable { - toJSON() { - return { $type: 'Serializable' }; - } - } - class Collection { - #things: T[]; - constructor(things: T[]) { - this.#things = Array.from(things); - } - toJSON() { - return Array.from(this.#things); - } - [Symbol.iterator]() { - return this.#things[Symbol.iterator]; - } - } - for (const jsonValue of [{}, [], { __proto__: null }, new Serializable(), new Collection(['item'])]) { - test(`serializes ${util.inspect(jsonValue)} as json`, () => { - const { req } = client.buildRequest({ - path: '/foo', - method: 'post', - body: jsonValue, - }); - expect(req.headers).toBeInstanceOf(Headers); - expect(req.headers.get('content-type')).toEqual('application/json'); - expect(req.body).toBe(JSON.stringify(jsonValue)); - }); - } - - const encoder = new TextEncoder(); - const asyncIterable = (async function* () { - yield encoder.encode('a\n'); - yield encoder.encode('b\n'); - yield encoder.encode('c\n'); - })(); - for (const streamValue of [ - [encoder.encode('a\nb\nc\n')][Symbol.iterator](), - new Response('a\nb\nc\n').body, - asyncIterable, - ]) { - test(`converts ${util.inspect(streamValue)} to ReadableStream`, async () => { - const { req } = client.buildRequest({ - path: '/foo', - method: 'post', - body: streamValue, - }); - expect(req.headers).toBeInstanceOf(Headers); - expect(req.headers.get('content-type')).toEqual(null); - expect(req.body).toBeInstanceOf(ReadableStream); - expect(await new Response(req.body).text()).toBe('a\nb\nc\n'); - }); - } - - test(`can set content-type for ReadableStream`, async () => { - const { req } = client.buildRequest({ - path: '/foo', - method: 'post', - body: new Response('a\nb\nc\n').body, - headers: { 'Content-Type': 'text/plain' }, - }); - expect(req.headers).toBeInstanceOf(Headers); - expect(req.headers.get('content-type')).toEqual('text/plain'); - expect(req.body).toBeInstanceOf(ReadableStream); - expect(await new Response(req.body).text()).toBe('a\nb\nc\n'); - }); -}); - describe('retries', () => { test('retry on timeout', async () => { let count = 0; - const testFetch = async ( - url: string | URL | Request, - { signal }: RequestInit = {}, - ): Promise => { + const testFetch = async (url: RequestInfo, { signal }: RequestInit = {}): Promise => { if (count++ === 0) { return new Promise( (resolve, reject) => signal?.addEventListener('abort', () => reject(new Error('timed out'))), @@ -549,7 +248,7 @@ describe('retries', () => { return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); }; - const client = new LlamaStackClient({ apiKey: 'My API Key', timeout: 10, fetch: testFetch }); + const client = new LlamaStackClient({ timeout: 10, fetch: testFetch }); expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 }); expect(count).toEqual(2); @@ -565,7 +264,7 @@ describe('retries', () => { test('retry count header', async () => { let count = 0; let capturedRequest: RequestInit | undefined; - const testFetch = async (url: string | URL | Request, init: RequestInit = {}): Promise => { + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { count++; if (count <= 2) { return new Response(undefined, { @@ -579,18 +278,18 @@ describe('retries', () => { return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); }; - const client = new LlamaStackClient({ apiKey: 'My API Key', fetch: testFetch, maxRetries: 4 }); + const client = new LlamaStackClient({ fetch: testFetch, maxRetries: 4 }); expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 }); - expect((capturedRequest!.headers as Headers).get('x-stainless-retry-count')).toEqual('2'); + expect((capturedRequest!.headers as Headers)['x-stainless-retry-count']).toEqual('2'); expect(count).toEqual(3); }); test('omit retry count header', async () => { let count = 0; let capturedRequest: RequestInit | undefined; - const testFetch = async (url: string | URL | Request, init: RequestInit = {}): Promise => { + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { count++; if (count <= 2) { return new Response(undefined, { @@ -603,7 +302,7 @@ describe('retries', () => { capturedRequest = init; return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); }; - const client = new LlamaStackClient({ apiKey: 'My API Key', fetch: testFetch, maxRetries: 4 }); + const client = new LlamaStackClient({ fetch: testFetch, maxRetries: 4 }); expect( await client.request({ @@ -613,13 +312,13 @@ describe('retries', () => { }), ).toEqual({ a: 1 }); - expect((capturedRequest!.headers as Headers).has('x-stainless-retry-count')).toBe(false); + expect(capturedRequest!.headers as Headers).not.toHaveProperty('x-stainless-retry-count'); }); test('omit retry count header by default', async () => { let count = 0; let capturedRequest: RequestInit | undefined; - const testFetch = async (url: string | URL | Request, init: RequestInit = {}): Promise => { + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { count++; if (count <= 2) { return new Response(undefined, { @@ -633,7 +332,6 @@ describe('retries', () => { return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); }; const client = new LlamaStackClient({ - apiKey: 'My API Key', fetch: testFetch, maxRetries: 4, defaultHeaders: { 'X-Stainless-Retry-Count': null }, @@ -652,7 +350,7 @@ describe('retries', () => { test('overwrite retry count header', async () => { let count = 0; let capturedRequest: RequestInit | undefined; - const testFetch = async (url: string | URL | Request, init: RequestInit = {}): Promise => { + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { count++; if (count <= 2) { return new Response(undefined, { @@ -665,7 +363,7 @@ describe('retries', () => { capturedRequest = init; return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); }; - const client = new LlamaStackClient({ apiKey: 'My API Key', fetch: testFetch, maxRetries: 4 }); + const client = new LlamaStackClient({ fetch: testFetch, maxRetries: 4 }); expect( await client.request({ @@ -675,15 +373,12 @@ describe('retries', () => { }), ).toEqual({ a: 1 }); - expect((capturedRequest!.headers as Headers).get('x-stainless-retry-count')).toEqual('42'); + expect((capturedRequest!.headers as Headers)['x-stainless-retry-count']).toBe('42'); }); test('retry on 429 with retry-after', async () => { let count = 0; - const testFetch = async ( - url: string | URL | Request, - { signal }: RequestInit = {}, - ): Promise => { + const testFetch = async (url: RequestInfo, { signal }: RequestInit = {}): Promise => { if (count++ === 0) { return new Response(undefined, { status: 429, @@ -695,7 +390,7 @@ describe('retries', () => { return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); }; - const client = new LlamaStackClient({ apiKey: 'My API Key', fetch: testFetch }); + const client = new LlamaStackClient({ fetch: testFetch }); expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 }); expect(count).toEqual(2); @@ -710,10 +405,7 @@ describe('retries', () => { test('retry on 429 with retry-after-ms', async () => { let count = 0; - const testFetch = async ( - url: string | URL | Request, - { signal }: RequestInit = {}, - ): Promise => { + const testFetch = async (url: RequestInfo, { signal }: RequestInit = {}): Promise => { if (count++ === 0) { return new Response(undefined, { status: 429, @@ -725,7 +417,7 @@ describe('retries', () => { return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); }; - const client = new LlamaStackClient({ apiKey: 'My API Key', fetch: testFetch }); + const client = new LlamaStackClient({ fetch: testFetch }); expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 }); expect(count).toEqual(2); diff --git a/tests/internal/decoders/line.test.ts b/tests/internal/decoders/line.test.ts new file mode 100644 index 0000000..447429a --- /dev/null +++ b/tests/internal/decoders/line.test.ts @@ -0,0 +1,128 @@ +import { findDoubleNewlineIndex, LineDecoder } from 'llama-stack-client/internal/decoders/line'; + +function decodeChunks(chunks: string[], { flush }: { flush: boolean } = { flush: false }): string[] { + const decoder = new LineDecoder(); + const lines: string[] = []; + for (const chunk of chunks) { + lines.push(...decoder.decode(chunk)); + } + + if (flush) { + lines.push(...decoder.flush()); + } + + return lines; +} + +describe('line decoder', () => { + test('basic', () => { + // baz is not included because the line hasn't ended yet + expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']); + }); + + test('basic with \\r', () => { + expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); + expect(decodeChunks(['foo', ' bar\r\nbaz'], { flush: true })).toEqual(['foo bar', 'baz']); + }); + + test('trailing new lines', () => { + expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('trailing new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('escaped new lines', () => { + expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']); + }); + + test('escaped new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); + }); + + test('\\r & \\n split across multiple chunks', () => { + expect(decodeChunks(['foo\r', '\n', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('single \\r', () => { + expect(decodeChunks(['foo\r', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('double \\r', () => { + expect(decodeChunks(['foo\r', 'bar\r'], { flush: true })).toEqual(['foo', 'bar']); + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + // implementation detail that we don't yield the single \r line until a new \r or \n is encountered + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: false })).toEqual(['foo']); + }); + + test('double \\r then \\r\\n', () => { + expect(decodeChunks(['foo\r', '\r', '\r', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + }); + + test('double newline', () => { + expect(decodeChunks(['foo\n\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('multi-byte characters across chunks', () => { + const decoder = new LineDecoder(); + + // bytes taken from the string 'известни' and arbitrarily split + // so that some multi-byte characters span multiple chunks + expect(decoder.decode(new Uint8Array([0xd0]))).toHaveLength(0); + expect(decoder.decode(new Uint8Array([0xb8, 0xd0, 0xb7, 0xd0]))).toHaveLength(0); + expect( + decoder.decode(new Uint8Array([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8])), + ).toHaveLength(0); + + const decoded = decoder.decode(new Uint8Array([0xa])); + expect(decoded).toEqual(['известни']); + }); + + test('flushing trailing newlines', () => { + expect(decodeChunks(['foo\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('flushing empty buffer', () => { + expect(decodeChunks([], { flush: true })).toEqual([]); + }); +}); + +describe('findDoubleNewlineIndex', () => { + test('finds \\n\\n', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\n\nbar'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\n\nbar'))).toBe(2); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\n\n'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\n\n'))).toBe(2); + }); + + test('finds \\r\\r', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\rbar'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\rbar'))).toBe(2); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\r'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\r'))).toBe(2); + }); + + test('finds \\r\\n\\r\\n', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r\nbar'))).toBe(7); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\n\r\nbar'))).toBe(4); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r\n'))).toBe(7); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\n\r\n'))).toBe(4); + }); + + test('returns -1 when no double newline found', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\nbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\rbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\nbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode(''))).toBe(-1); + }); + + test('handles incomplete patterns', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n'))).toBe(-1); + }); +}); diff --git a/tests/path.test.ts b/tests/path.test.ts deleted file mode 100644 index c4e7516..0000000 --- a/tests/path.test.ts +++ /dev/null @@ -1,318 +0,0 @@ -import { createPathTagFunction, encodeURIPath } from 'llama-stack-client/internal/utils/path'; -import { inspect } from 'node:util'; - -describe('path template tag function', () => { - test('validates input', () => { - const testParams = ['', '.', '..', 'x', '%2e', '%2E', '%2e%2e', '%2E%2e', '%2e%2E', '%2E%2E']; - const testCases = [ - ['/path_params/', '/a'], - ['/path_params/', '/'], - ['/path_params/', ''], - ['', '/a'], - ['', '/'], - ['', ''], - ['a'], - [''], - ['/path_params/', ':initiate'], - ['/path_params/', '.json'], - ['/path_params/', '?beta=true'], - ['/path_params/', '.?beta=true'], - ['/path_params/', '/', '/download'], - ['/path_params/', '-', '/download'], - ['/path_params/', '', '/download'], - ['/path_params/', '.', '/download'], - ['/path_params/', '..', '/download'], - ['/plain/path'], - ]; - - function paramPermutations(len: number): string[][] { - if (len === 0) return []; - if (len === 1) return testParams.map((e) => [e]); - const rest = paramPermutations(len - 1); - return testParams.flatMap((e) => rest.map((r) => [e, ...r])); - } - - // we need to test how %2E is handled so we use a custom encoder that does no escaping - const rawPath = createPathTagFunction((s) => s); - - const results: { - [pathParts: string]: { - [params: string]: { valid: boolean; result?: string; error?: string }; - }; - } = {}; - - for (const pathParts of testCases) { - const pathResults: Record = {}; - results[JSON.stringify(pathParts)] = pathResults; - for (const params of paramPermutations(pathParts.length - 1)) { - const stringRaw = String.raw({ raw: pathParts }, ...params); - const plainString = String.raw( - { raw: pathParts.map((e) => e.replace(/\./g, 'x')) }, - ...params.map((e) => 'X'.repeat(e.length)), - ); - const normalizedStringRaw = new URL(stringRaw, 'https://example.com').href; - const normalizedPlainString = new URL(plainString, 'https://example.com').href; - const pathResultsKey = JSON.stringify(params); - try { - const result = rawPath(pathParts, ...params); - expect(result).toBe(stringRaw); - // there are no special segments, so the length of the normalized path is - // equal to the length of the normalized plain path. - expect(normalizedStringRaw.length).toBe(normalizedPlainString.length); - pathResults[pathResultsKey] = { - valid: true, - result, - }; - } catch (e) { - const error = String(e); - expect(error).toMatch(/Path parameters result in path with invalid segment/); - // there are special segments, so the length of the normalized path is - // different than the length of the normalized plain path. - expect(normalizedStringRaw.length).not.toBe(normalizedPlainString.length); - pathResults[pathResultsKey] = { - valid: false, - error, - }; - } - } - } - - expect(results).toMatchObject({ - '["/path_params/","/a"]': { - '["x"]': { valid: true, result: '/path_params/x/a' }, - '[""]': { valid: true, result: '/path_params//a' }, - '["%2E%2e"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2E%2e/a\n' + - ' ^^^^^^', - }, - '["%2E"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2E/a\n' + - ' ^^^', - }, - }, - '["/path_params/","/"]': { - '["x"]': { valid: true, result: '/path_params/x/' }, - '[""]': { valid: true, result: '/path_params//' }, - '["%2e%2E"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2e%2E/\n' + - ' ^^^^^^', - }, - '["%2e"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2e/\n' + - ' ^^^', - }, - }, - '["/path_params/",""]': { - '[""]': { valid: true, result: '/path_params/' }, - '["x"]': { valid: true, result: '/path_params/x' }, - '["%2E"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2E\n' + - ' ^^^', - }, - '["%2E%2e"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2E%2e\n' + - ' ^^^^^^', - }, - }, - '["","/a"]': { - '[""]': { valid: true, result: '/a' }, - '["x"]': { valid: true, result: 'x/a' }, - '["%2E"]': { - valid: false, - error: 'Error: Path parameters result in path with invalid segments:\n%2E/a\n^^^', - }, - '["%2e%2E"]': { - valid: false, - error: 'Error: Path parameters result in path with invalid segments:\n' + '%2e%2E/a\n' + '^^^^^^', - }, - }, - '["","/"]': { - '["x"]': { valid: true, result: 'x/' }, - '[""]': { valid: true, result: '/' }, - '["%2E%2e"]': { - valid: false, - error: 'Error: Path parameters result in path with invalid segments:\n' + '%2E%2e/\n' + '^^^^^^', - }, - '["."]': { - valid: false, - error: 'Error: Path parameters result in path with invalid segments:\n./\n^', - }, - }, - '["",""]': { - '[""]': { valid: true, result: '' }, - '["x"]': { valid: true, result: 'x' }, - '[".."]': { - valid: false, - error: 'Error: Path parameters result in path with invalid segments:\n..\n^^', - }, - '["."]': { - valid: false, - error: 'Error: Path parameters result in path with invalid segments:\n.\n^', - }, - }, - '["a"]': {}, - '[""]': {}, - '["/path_params/",":initiate"]': { - '[""]': { valid: true, result: '/path_params/:initiate' }, - '["."]': { valid: true, result: '/path_params/.:initiate' }, - }, - '["/path_params/",".json"]': { - '["x"]': { valid: true, result: '/path_params/x.json' }, - '["."]': { valid: true, result: '/path_params/..json' }, - }, - '["/path_params/","?beta=true"]': { - '["x"]': { valid: true, result: '/path_params/x?beta=true' }, - '[""]': { valid: true, result: '/path_params/?beta=true' }, - '["%2E%2E"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2E%2E?beta=true\n' + - ' ^^^^^^', - }, - '["%2e%2E"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2e%2E?beta=true\n' + - ' ^^^^^^', - }, - }, - '["/path_params/",".?beta=true"]': { - '[".."]': { valid: true, result: '/path_params/...?beta=true' }, - '["x"]': { valid: true, result: '/path_params/x.?beta=true' }, - '[""]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/.?beta=true\n' + - ' ^', - }, - '["%2e"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2e.?beta=true\n' + - ' ^^^^', - }, - }, - '["/path_params/","/","/download"]': { - '["",""]': { valid: true, result: '/path_params///download' }, - '["","x"]': { valid: true, result: '/path_params//x/download' }, - '[".","%2e"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/./%2e/download\n' + - ' ^ ^^^', - }, - '["%2E%2e","%2e"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2E%2e/%2e/download\n' + - ' ^^^^^^ ^^^', - }, - }, - '["/path_params/","-","/download"]': { - '["","%2e"]': { valid: true, result: '/path_params/-%2e/download' }, - '["%2E",".."]': { valid: true, result: '/path_params/%2E-../download' }, - }, - '["/path_params/","","/download"]': { - '["%2E%2e","%2e%2E"]': { valid: true, result: '/path_params/%2E%2e%2e%2E/download' }, - '["%2E",".."]': { valid: true, result: '/path_params/%2E../download' }, - '["","%2E"]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2E/download\n' + - ' ^^^', - }, - '["%2E","."]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/%2E./download\n' + - ' ^^^^', - }, - }, - '["/path_params/",".","/download"]': { - '["%2e%2e",""]': { valid: true, result: '/path_params/%2e%2e./download' }, - '["","%2e%2e"]': { valid: true, result: '/path_params/.%2e%2e/download' }, - '["",""]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/./download\n' + - ' ^', - }, - '["","."]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/../download\n' + - ' ^^', - }, - }, - '["/path_params/","..","/download"]': { - '["","%2E"]': { valid: true, result: '/path_params/..%2E/download' }, - '["","x"]': { valid: true, result: '/path_params/..x/download' }, - '["",""]': { - valid: false, - error: - 'Error: Path parameters result in path with invalid segments:\n' + - '/path_params/../download\n' + - ' ^^', - }, - }, - }); - }); -}); - -describe('encodeURIPath', () => { - const testCases: string[] = [ - '', - // Every ASCII character - ...Array.from({ length: 0x7f }, (_, i) => String.fromCharCode(i)), - // Unicode BMP codepoint - 'å', - // Unicode supplementary codepoint - '😃', - ]; - - for (const param of testCases) { - test('properly encodes ' + inspect(param), () => { - const encoded = encodeURIPath(param); - const naiveEncoded = encodeURIComponent(param); - // we should never encode more characters than encodeURIComponent - expect(naiveEncoded.length).toBeGreaterThanOrEqual(encoded.length); - expect(decodeURIComponent(encoded)).toBe(param); - }); - } - - test("leaves ':' intact", () => { - expect(encodeURIPath(':')).toBe(':'); - }); - - test("leaves '@' intact", () => { - expect(encodeURIPath('@')).toBe('@'); - }); -}); diff --git a/tests/qs/utils.test.ts b/tests/qs/utils.test.ts index ae6ab6d..56114eb 100644 --- a/tests/qs/utils.test.ts +++ b/tests/qs/utils.test.ts @@ -66,7 +66,7 @@ describe('merge()', function () { // st.equal(getCount, 1); expect(setCount).toEqual(0); expect(getCount).toEqual(1); - observed[0] = observed[0]; + observed[0] = observed[0]; // eslint-disable-line no-self-assign // st.equal(setCount, 1); // st.equal(getCount, 2); expect(setCount).toEqual(1); diff --git a/tests/responses.test.ts b/tests/responses.test.ts new file mode 100644 index 0000000..2c2ce39 --- /dev/null +++ b/tests/responses.test.ts @@ -0,0 +1,25 @@ +import { createResponseHeaders } from 'llama-stack-client/core'; +import { Headers } from 'llama-stack-client/_shims/index'; + +describe('response parsing', () => { + // TODO: test unicode characters + test('headers are case agnostic', async () => { + const headers = createResponseHeaders(new Headers({ 'Content-Type': 'foo', Accept: 'text/plain' })); + expect(headers['content-type']).toEqual('foo'); + expect(headers['Content-type']).toEqual('foo'); + expect(headers['Content-Type']).toEqual('foo'); + expect(headers['accept']).toEqual('text/plain'); + expect(headers['Accept']).toEqual('text/plain'); + expect(headers['Hello-World']).toBeUndefined(); + }); + + test('duplicate headers are concatenated', () => { + const headers = createResponseHeaders( + new Headers([ + ['Content-Type', 'text/xml'], + ['Content-Type', 'application/json'], + ]), + ); + expect(headers['content-type']).toBe('text/xml, application/json'); + }); +}); diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts new file mode 100644 index 0000000..8034695 --- /dev/null +++ b/tests/streaming.test.ts @@ -0,0 +1,244 @@ +import { Response } from 'node-fetch'; +import { PassThrough } from 'stream'; +import assert from 'assert'; +import { _iterSSEMessages } from 'llama-stack-client/streaming'; + +describe('streaming decoding', () => { + test('basic', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: completion\n'); + yield Buffer.from('data: {"foo":true}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(JSON.parse(event.value.data)).toEqual({ foo: true }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('data without event', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('data: {"foo":true}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toBeNull(); + expect(JSON.parse(event.value.data)).toEqual({ foo: true }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('event without data', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: foo\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('foo'); + expect(event.value.data).toEqual(''); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('multiple events', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: foo\n'); + yield Buffer.from('\n'); + yield Buffer.from('event: ping\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('foo'); + expect(event.value.data).toEqual(''); + + event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('ping'); + expect(event.value.data).toEqual(''); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('multiple events with data', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: foo\n'); + yield Buffer.from('data: {"foo":true}\n'); + yield Buffer.from('\n'); + yield Buffer.from('event: ping\n'); + yield Buffer.from('data: {"bar":false}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('foo'); + expect(JSON.parse(event.value.data)).toEqual({ foo: true }); + + event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('ping'); + expect(JSON.parse(event.value.data)).toEqual({ bar: false }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('multiple data lines with empty line', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: ping\n'); + yield Buffer.from('data: {\n'); + yield Buffer.from('data: "foo":\n'); + yield Buffer.from('data: \n'); + yield Buffer.from('data:\n'); + yield Buffer.from('data: true}\n'); + yield Buffer.from('\n\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('ping'); + expect(JSON.parse(event.value.data)).toEqual({ foo: true }); + expect(event.value.data).toEqual('{\n"foo":\n\n\ntrue}'); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('data json escaped double new line', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: ping\n'); + yield Buffer.from('data: {"foo": "my long\\n\\ncontent"}'); + yield Buffer.from('\n\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('ping'); + expect(JSON.parse(event.value.data)).toEqual({ foo: 'my long\n\ncontent' }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('special new line characters', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('data: {"content": "culpa "}\n'); + yield Buffer.from('\n'); + yield Buffer.from('data: {"content": "'); + yield Buffer.from([0xe2, 0x80, 0xa8]); + yield Buffer.from('"}\n'); + yield Buffer.from('\n'); + yield Buffer.from('data: {"content": "foo"}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(JSON.parse(event.value.data)).toEqual({ content: 'culpa ' }); + + event = await stream.next(); + assert(event.value); + expect(JSON.parse(event.value.data)).toEqual({ content: Buffer.from([0xe2, 0x80, 0xa8]).toString() }); + + event = await stream.next(); + assert(event.value); + expect(JSON.parse(event.value.data)).toEqual({ content: 'foo' }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('multi-byte characters across chunks', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: completion\n'); + yield Buffer.from('data: {"content": "'); + // bytes taken from the string 'известни' and arbitrarily split + // so that some multi-byte characters span multiple chunks + yield Buffer.from([0xd0]); + yield Buffer.from([0xb8, 0xd0, 0xb7, 0xd0]); + yield Buffer.from([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8]); + yield Buffer.from('"}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('completion'); + expect(JSON.parse(event.value.data)).toEqual({ content: 'известни' }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); +}); + +async function iteratorToStream(iterator: AsyncGenerator): Promise { + const parts: unknown[] = []; + + for await (const chunk of iterator) { + parts.push(chunk); + } + + let index = 0; + + const stream = new PassThrough({ + read() { + const value = parts[index]; + if (value === undefined) { + stream.end(); + } else { + index += 1; + stream.write(value); + } + }, + }); + + return stream; +} diff --git a/tests/uploads.test.ts b/tests/uploads.test.ts index 0684474..97bcd8a 100644 --- a/tests/uploads.test.ts +++ b/tests/uploads.test.ts @@ -1,7 +1,6 @@ import fs from 'fs'; -import type { ResponseLike } from 'llama-stack-client/internal/to-file'; -import { toFile } from 'llama-stack-client/core/uploads'; -import { File } from 'node:buffer'; +import { toFile, type ResponseLike } from 'llama-stack-client/uploads'; +import { File } from 'llama-stack-client/_shims/index'; class MyClass { name: string = 'foo'; @@ -10,7 +9,7 @@ class MyClass { function mockResponse({ url, content }: { url: string; content?: Blob }): ResponseLike { return { url, - blob: async () => content || new Blob([]), + blob: async () => content as any, }; } @@ -63,45 +62,4 @@ describe('toFile', () => { expect(file.name).toEqual('input.jsonl'); expect(file.type).toBe('jsonl'); }); - - it('is assignable to File and Blob', async () => { - const input = new File(['foo'], 'input.jsonl', { type: 'jsonl' }); - const result = await toFile(input); - const file: File = result; - const blob: Blob = result; - void file, blob; - }); -}); - -describe('missing File error message', () => { - let prevGlobalFile: unknown; - let prevNodeFile: unknown; - beforeEach(() => { - // The file shim captures the global File object when it's first imported. - // Reset modules before each test so we can test the error thrown when it's undefined. - jest.resetModules(); - const buffer = require('node:buffer'); - // @ts-ignore - prevGlobalFile = globalThis.File; - prevNodeFile = buffer.File; - // @ts-ignore - globalThis.File = undefined; - buffer.File = undefined; - }); - afterEach(() => { - // Clean up - // @ts-ignore - globalThis.File = prevGlobalFile; - require('node:buffer').File = prevNodeFile; - jest.resetModules(); - }); - - test('is thrown', async () => { - const uploads = await import('llama-stack-client/core/uploads'); - await expect( - uploads.toFile(mockResponse({ url: 'https://example.com/my/audio.mp3' })), - ).rejects.toMatchInlineSnapshot( - `[Error: \`File\` is not defined as a global, which is required for file uploads.]`, - ); - }); }); diff --git a/tsc-multi.json b/tsc-multi.json index 384ddac..4facad5 100644 --- a/tsc-multi.json +++ b/tsc-multi.json @@ -1,15 +1,7 @@ { "targets": [ - { - "extname": ".js", - "module": "commonjs", - "shareHelpers": "internal/tslib.js" - }, - { - "extname": ".mjs", - "module": "esnext", - "shareHelpers": "internal/tslib.mjs" - } + { "extname": ".js", "module": "commonjs" }, + { "extname": ".mjs", "module": "esnext" } ], "projects": ["tsconfig.build.json"] } diff --git a/tsconfig.build.json b/tsconfig.build.json index 2101712..97ed0bc 100644 --- a/tsconfig.build.json +++ b/tsconfig.build.json @@ -1,12 +1,12 @@ { "extends": "./tsconfig.json", "include": ["dist/src"], - "exclude": [], + "exclude": ["dist/src/_shims/*-deno.ts"], "compilerOptions": { "rootDir": "./dist/src", "paths": { "llama-stack-client/*": ["dist/src/*"], - "llama-stack-client": ["dist/src/index.ts"] + "llama-stack-client": ["dist/src/index.ts"], }, "noEmit": false, "declaration": true, diff --git a/tsconfig.dist-src.json b/tsconfig.dist-src.json index c550e29..e9f2d70 100644 --- a/tsconfig.dist-src.json +++ b/tsconfig.dist-src.json @@ -4,8 +4,8 @@ // via declaration maps "include": ["index.ts"], "compilerOptions": { - "target": "ES2015", - "lib": ["DOM", "DOM.Iterable", "ES2018"], + "target": "es2015", + "lib": ["DOM"], "moduleResolution": "node" } } diff --git a/tsconfig.json b/tsconfig.json index 9c074b3..e8aa30d 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,6 +1,6 @@ { "include": ["src", "tests", "examples"], - "exclude": [], + "exclude": ["src/_shims/**/*-deno.ts"], "compilerOptions": { "target": "es2020", "lib": ["es2020"], @@ -9,6 +9,7 @@ "esModuleInterop": true, "baseUrl": "./", "paths": { + "llama-stack-client/_shims/auto/*": ["src/_shims/auto/*-node"], "llama-stack-client/*": ["src/*"], "llama-stack-client": ["src/index.ts"] }, @@ -31,7 +32,7 @@ "noUncheckedIndexedAccess": true, "noImplicitOverride": true, "noPropertyAccessFromIndexSignature": true, - "isolatedModules": false, + "isolatedModules": false, "skipLibCheck": true } diff --git a/yarn.lock b/yarn.lock index 58c08d5..bb17942 100644 --- a/yarn.lock +++ b/yarn.lock @@ -15,37 +15,6 @@ "@jridgewell/gen-mapping" "^0.3.0" "@jridgewell/trace-mapping" "^0.3.9" -"@andrewbranch/untar.js@^1.0.3": - version "1.0.3" - resolved "https://registry.yarnpkg.com/@andrewbranch/untar.js/-/untar.js-1.0.3.tgz#ba9494f85eb83017c5c855763969caf1d0adea00" - integrity sha512-Jh15/qVmrLGhkKJBdXlK1+9tY4lZruYjsgkDFj08ZmDiWVBLJcqkok7Z0/R0In+i1rScBpJlSvrTS2Lm41Pbnw== - -"@arethetypeswrong/cli@^0.17.0": - version "0.17.0" - resolved "https://registry.yarnpkg.com/@arethetypeswrong/cli/-/cli-0.17.0.tgz#f97f10926b3f9f9eb5117550242d2e06c25cadac" - integrity sha512-xSMW7bfzVWpYw5JFgZqBXqr6PdR0/REmn3DkxCES5N0JTcB0CVgbIynJCvKBFmXaPc3hzmmTrb7+yPDRoOSZdA== - dependencies: - "@arethetypeswrong/core" "0.17.0" - chalk "^4.1.2" - cli-table3 "^0.6.3" - commander "^10.0.1" - marked "^9.1.2" - marked-terminal "^7.1.0" - semver "^7.5.4" - -"@arethetypeswrong/core@0.17.0": - version "0.17.0" - resolved "https://registry.yarnpkg.com/@arethetypeswrong/core/-/core-0.17.0.tgz#abb3b5f425056d37193644c2a2de4aecf866b76b" - integrity sha512-FHyhFizXNetigTVsIhqXKGYLpazPS5YNojEPpZEUcBPt9wVvoEbNIvG+hybuBR+pjlRcbyuqhukHZm1fr+bDgA== - dependencies: - "@andrewbranch/untar.js" "^1.0.3" - cjs-module-lexer "^1.2.3" - fflate "^0.8.2" - lru-cache "^10.4.3" - semver "^7.5.4" - typescript "5.6.1-rc" - validate-npm-package-name "^5.0.0" - "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.22.13", "@babel/code-frame@^7.23.5": version "7.23.5" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.23.5.tgz#9009b69a8c602293476ad598ff53e4562e15c244" @@ -333,11 +302,6 @@ resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39" integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw== -"@colors/colors@1.5.0": - version "1.5.0" - resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9" - integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ== - "@cspotcode/source-map-consumer@0.8.0": version "0.8.0" resolved "https://registry.yarnpkg.com/@cspotcode/source-map-consumer/-/source-map-consumer-0.8.0.tgz#33bf4b7b39c178821606f669bbc447a6a629786b" @@ -357,94 +321,54 @@ dependencies: eslint-visitor-keys "^3.3.0" -"@eslint-community/regexpp@^4.10.0", "@eslint-community/regexpp@^4.12.1": - version "4.12.1" - resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.12.1.tgz#cfc6cffe39df390a3841cde2abccf92eaa7ae0e0" - integrity sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ== +"@eslint-community/regexpp@^4.5.1": + version "4.11.1" + resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.11.1.tgz#a547badfc719eb3e5f4b556325e542fbe9d7a18f" + integrity sha512-m4DVN9ZqskZoLU5GlWZadwDnYo3vAEydiUayB9widCl9ffWx2IvPnp6n3on5rJmziJSw9Bv+Z3ChDVdMwXCY8Q== -"@eslint/config-array@^0.19.0": - version "0.19.2" - resolved "https://registry.yarnpkg.com/@eslint/config-array/-/config-array-0.19.2.tgz#3060b809e111abfc97adb0bb1172778b90cb46aa" - integrity sha512-GNKqxfHG2ySmJOBSHg7LxeUx4xpuCoFjacmlCoYWEbaPXLwvfIjixRI12xCQZeULksQb23uiA8F40w5TojpV7w== - dependencies: - "@eslint/object-schema" "^2.1.6" - debug "^4.3.1" - minimatch "^3.1.2" - -"@eslint/core@^0.10.0": - version "0.10.0" - resolved "https://registry.yarnpkg.com/@eslint/core/-/core-0.10.0.tgz#23727063c21b335f752dbb3a16450f6f9cbc9091" - integrity sha512-gFHJ+xBOo4G3WRlR1e/3G8A6/KZAH6zcE/hkLRCZTi/B9avAG365QhFA8uOGzTMqgTghpn7/fSnscW++dpMSAw== - dependencies: - "@types/json-schema" "^7.0.15" - -"@eslint/core@^0.11.0": - version "0.11.0" - resolved "https://registry.yarnpkg.com/@eslint/core/-/core-0.11.0.tgz#7a9226e850922e42cbd2ba71361eacbe74352a12" - integrity sha512-DWUB2pksgNEb6Bz2fggIy1wh6fGgZP4Xyy/Mt0QZPiloKKXerbqq9D3SBQTlCRYOrcRPu4vuz+CGjwdfqxnoWA== - dependencies: - "@types/json-schema" "^7.0.15" +"@eslint-community/regexpp@^4.6.1": + version "4.6.2" + resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.6.2.tgz#1816b5f6948029c5eaacb0703b850ee0cb37d8f8" + integrity sha512-pPTNuaAG3QMH+buKyBIGJs3g/S5y0caxw0ygM3YyE6yJFySwiGGSzA+mM3KJ8QQvzeLh3blwgSonkFjgQdxzMw== -"@eslint/eslintrc@^3.2.0": - version "3.2.0" - resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-3.2.0.tgz#57470ac4e2e283a6bf76044d63281196e370542c" - integrity sha512-grOjVNN8P3hjJn/eIETF1wwd12DdnwFDoyceUJLYYdkpbwq3nLi+4fqrTAONx7XDALqlL220wC/RHSC/QTI/0w== +"@eslint/eslintrc@^2.1.2": + version "2.1.2" + resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-2.1.2.tgz#c6936b4b328c64496692f76944e755738be62396" + integrity sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g== dependencies: ajv "^6.12.4" debug "^4.3.2" - espree "^10.0.1" - globals "^14.0.0" + espree "^9.6.0" + globals "^13.19.0" ignore "^5.2.0" import-fresh "^3.2.1" js-yaml "^4.1.0" minimatch "^3.1.2" strip-json-comments "^3.1.1" -"@eslint/js@9.20.0": - version "9.20.0" - resolved "https://registry.yarnpkg.com/@eslint/js/-/js-9.20.0.tgz#7421bcbe74889fcd65d1be59f00130c289856eb4" - integrity sha512-iZA07H9io9Wn836aVTytRaNqh00Sad+EamwOVJT12GTLw1VGMFV/4JaME+JjLtr9fiGaoWgYnS54wrfWsSs4oQ== +"@eslint/js@8.50.0": + version "8.50.0" + resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.50.0.tgz#9e93b850f0f3fa35f5fa59adfd03adae8488e484" + integrity sha512-NCC3zz2+nvYd+Ckfh87rA47zfu2QsQpvc6k1yzTk+b9KzRj0wkGa8LSoGOXN6Zv4lRf/EIoZ80biDh9HOI+RNQ== -"@eslint/object-schema@^2.1.6": - version "2.1.6" - resolved "https://registry.yarnpkg.com/@eslint/object-schema/-/object-schema-2.1.6.tgz#58369ab5b5b3ca117880c0f6c0b0f32f6950f24f" - integrity sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA== - -"@eslint/plugin-kit@^0.2.5": - version "0.2.5" - resolved "https://registry.yarnpkg.com/@eslint/plugin-kit/-/plugin-kit-0.2.5.tgz#ee07372035539e7847ef834e3f5e7b79f09e3a81" - integrity sha512-lB05FkqEdUg2AA0xEbUz0SnkXT1LcCTa438W4IWTUh4hdOnVbQyOJ81OrDXsJk/LSiJHubgGEFoR5EHq1NsH1A== +"@humanwhocodes/config-array@^0.11.11": + version "0.11.11" + resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.11.tgz#88a04c570dbbc7dd943e4712429c3df09bc32844" + integrity sha512-N2brEuAadi0CcdeMXUkhbZB84eskAc8MEX1By6qEchoVywSgXPIjou4rYsl0V3Hj0ZnuGycGCjdNgockbzeWNA== dependencies: - "@eslint/core" "^0.10.0" - levn "^0.4.1" - -"@humanfs/core@^0.19.1": - version "0.19.1" - resolved "https://registry.yarnpkg.com/@humanfs/core/-/core-0.19.1.tgz#17c55ca7d426733fe3c561906b8173c336b40a77" - integrity sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA== - -"@humanfs/node@^0.16.6": - version "0.16.6" - resolved "https://registry.yarnpkg.com/@humanfs/node/-/node-0.16.6.tgz#ee2a10eaabd1131987bf0488fd9b820174cd765e" - integrity sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw== - dependencies: - "@humanfs/core" "^0.19.1" - "@humanwhocodes/retry" "^0.3.0" + "@humanwhocodes/object-schema" "^1.2.1" + debug "^4.1.1" + minimatch "^3.0.5" "@humanwhocodes/module-importer@^1.0.1": version "1.0.1" resolved "https://registry.yarnpkg.com/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz#af5b2691a22b44be847b0ca81641c5fb6ad0172c" integrity sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA== -"@humanwhocodes/retry@^0.3.0": - version "0.3.1" - resolved "https://registry.yarnpkg.com/@humanwhocodes/retry/-/retry-0.3.1.tgz#c72a5c76a9fbaf3488e231b13dc52c0da7bab42a" - integrity sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA== - -"@humanwhocodes/retry@^0.4.1": - version "0.4.1" - resolved "https://registry.yarnpkg.com/@humanwhocodes/retry/-/retry-0.4.1.tgz#9a96ce501bc62df46c4031fbd970e3cc6b10f07b" - integrity sha512-c7hNEllBlenFTHBky65mhq8WD2kbN9Q6gk0bTk8lSBvc554jpXSkST1iePudpt7+A/AQvuHs9EMqjHDXMY1lrA== +"@humanwhocodes/object-schema@^1.2.1": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz#b520529ec21d8e5945a1851dfd1c32e94e39ff45" + integrity sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA== "@istanbuljs/load-nyc-config@^1.0.0": version "1.1.0" @@ -706,7 +630,7 @@ resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== -"@nodelib/fs.walk@^1.2.3": +"@nodelib/fs.walk@^1.2.3", "@nodelib/fs.walk@^1.2.8": version "1.2.8" resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a" integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg== @@ -714,21 +638,23 @@ "@nodelib/fs.scandir" "2.1.5" fastq "^1.6.0" -"@pkgr/core@^0.2.4": - version "0.2.4" - resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.2.4.tgz#d897170a2b0ba51f78a099edccd968f7b103387c" - integrity sha512-ROFF39F6ZrnzSUEmQQZUar0Jt4xVoP9WnDRdWwF4NNcXs3xBTLgBUDoOwW141y1jP+S8nahIbdxbFC7IShw9Iw== +"@pkgr/utils@^2.4.2": + version "2.4.2" + resolved "https://registry.yarnpkg.com/@pkgr/utils/-/utils-2.4.2.tgz#9e638bbe9a6a6f165580dc943f138fd3309a2cbc" + integrity sha512-POgTXhjrTfbTV63DiFXav4lBHiICLKKwDeaKn9Nphwj7WH6m0hMMCaJkMyRWjgtPFyRKRVoMXXjczsTQRDEhYw== + dependencies: + cross-spawn "^7.0.3" + fast-glob "^3.3.0" + is-glob "^4.0.3" + open "^9.1.0" + picocolors "^1.0.0" + tslib "^2.6.0" "@sinclair/typebox@^0.27.8": version "0.27.8" resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== -"@sindresorhus/is@^4.6.0": - version "4.6.0" - resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-4.6.0.tgz#3c7c9c46e678feefe7a2e5bb609d3dbd665ffb3f" - integrity sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw== - "@sinonjs/commons@^3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-3.0.0.tgz#beb434fe875d965265e04722ccfc21df7f755d72" @@ -886,11 +812,6 @@ dependencies: "@babel/types" "^7.20.7" -"@types/estree@^1.0.6": - version "1.0.6" - resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.6.tgz#628effeeae2064a1b4e79f78e81d87b7e5fc7b50" - integrity sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw== - "@types/graceful-fs@^4.1.3": version "4.1.9" resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.9.tgz#2a06bc0f68a20ab37b3e36aa238be6abdf49e8b4" @@ -925,11 +846,19 @@ expect "^29.0.0" pretty-format "^29.0.0" -"@types/json-schema@^7.0.15": +"@types/json-schema@^7.0.12": version "7.0.15" resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== +"@types/node-fetch@^2.6.4": + version "2.6.4" + resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.4.tgz#1bc3a26de814f6bf466b25aeb1473fa1afe6a660" + integrity sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg== + dependencies: + "@types/node" "*" + form-data "^3.0.0" + "@types/node@*": version "20.10.5" resolved "https://registry.yarnpkg.com/@types/node/-/node-20.10.5.tgz#47ad460b514096b7ed63a1dae26fad0914ed3ab2" @@ -937,12 +866,15 @@ dependencies: undici-types "~5.26.4" -"@types/node@^20.17.6": - version "20.17.6" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.17.6.tgz#6e4073230c180d3579e8c60141f99efdf5df0081" - integrity sha512-VEI7OdvK2wP7XHnsuXbAJnEpEkF6NjSN45QJlL4VGqZSXsnicpesdTWsg9RISeSdYd3yeRj/y3k5KGjUXYnFwQ== - dependencies: - undici-types "~6.19.2" +"@types/node@^18.11.18": + version "18.11.18" + resolved "https://registry.yarnpkg.com/@types/node/-/node-18.11.18.tgz#8dfb97f0da23c2293e554c5a50d61ef134d7697f" + integrity sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA== + +"@types/semver@^7.5.0": + version "7.5.8" + resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.8.tgz#8268a8c57a3e4abd25c165ecd36237db7948a55e" + integrity sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ== "@types/stack-utils@^2.0.0": version "2.0.3" @@ -961,86 +893,98 @@ dependencies: "@types/yargs-parser" "*" -"@typescript-eslint/eslint-plugin@8.31.1": - version "8.31.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.31.1.tgz#62f1befe59647524994e89de4516d8dcba7a850a" - integrity sha512-oUlH4h1ABavI4F0Xnl8/fOtML/eu8nI2A1nYd+f+55XI0BLu+RIqKoCiZKNo6DtqZBEQm5aNKA20G3Z5w3R6GQ== +"@typescript-eslint/eslint-plugin@^6.7.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz#30830c1ca81fd5f3c2714e524c4303e0194f9cd3" + integrity sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA== dependencies: - "@eslint-community/regexpp" "^4.10.0" - "@typescript-eslint/scope-manager" "8.31.1" - "@typescript-eslint/type-utils" "8.31.1" - "@typescript-eslint/utils" "8.31.1" - "@typescript-eslint/visitor-keys" "8.31.1" + "@eslint-community/regexpp" "^4.5.1" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/type-utils" "6.21.0" + "@typescript-eslint/utils" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" + debug "^4.3.4" graphemer "^1.4.0" - ignore "^5.3.1" + ignore "^5.2.4" natural-compare "^1.4.0" - ts-api-utils "^2.0.1" + semver "^7.5.4" + ts-api-utils "^1.0.1" -"@typescript-eslint/parser@8.31.1": - version "8.31.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-8.31.1.tgz#e9b0ccf30d37dde724ee4d15f4dbc195995cce1b" - integrity sha512-oU/OtYVydhXnumd0BobL9rkJg7wFJ9bFFPmSmB/bf/XWN85hlViji59ko6bSKBXyseT9V8l+CN1nwmlbiN0G7Q== +"@typescript-eslint/parser@^6.7.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.21.0.tgz#af8fcf66feee2edc86bc5d1cf45e33b0630bf35b" + integrity sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ== dependencies: - "@typescript-eslint/scope-manager" "8.31.1" - "@typescript-eslint/types" "8.31.1" - "@typescript-eslint/typescript-estree" "8.31.1" - "@typescript-eslint/visitor-keys" "8.31.1" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" -"@typescript-eslint/scope-manager@8.31.1": - version "8.31.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-8.31.1.tgz#1eb52e76878f545e4add142e0d8e3e97e7aa443b" - integrity sha512-BMNLOElPxrtNQMIsFHE+3P0Yf1z0dJqV9zLdDxN/xLlWMlXK/ApEsVEKzpizg9oal8bAT5Sc7+ocal7AC1HCVw== +"@typescript-eslint/scope-manager@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz#ea8a9bfc8f1504a6ac5d59a6df308d3a0630a2b1" + integrity sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg== dependencies: - "@typescript-eslint/types" "8.31.1" - "@typescript-eslint/visitor-keys" "8.31.1" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" -"@typescript-eslint/type-utils@8.31.1": - version "8.31.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-8.31.1.tgz#be0f438fb24b03568e282a0aed85f776409f970c" - integrity sha512-fNaT/m9n0+dpSp8G/iOQ05GoHYXbxw81x+yvr7TArTuZuCA6VVKbqWYVZrV5dVagpDTtj/O8k5HBEE/p/HM5LA== +"@typescript-eslint/type-utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz#6473281cfed4dacabe8004e8521cee0bd9d4c01e" + integrity sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag== dependencies: - "@typescript-eslint/typescript-estree" "8.31.1" - "@typescript-eslint/utils" "8.31.1" + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/utils" "6.21.0" debug "^4.3.4" - ts-api-utils "^2.0.1" + ts-api-utils "^1.0.1" -"@typescript-eslint/types@8.31.1": - version "8.31.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-8.31.1.tgz#478ed6f7e8aee1be7b63a60212b6bffe1423b5d4" - integrity sha512-SfepaEFUDQYRoA70DD9GtytljBePSj17qPxFHA/h3eg6lPTqGJ5mWOtbXCk1YrVU1cTJRd14nhaXWFu0l2troQ== +"@typescript-eslint/types@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.21.0.tgz#205724c5123a8fef7ecd195075fa6e85bac3436d" + integrity sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg== -"@typescript-eslint/typescript-estree@8.31.1": - version "8.31.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-8.31.1.tgz#37792fe7ef4d3021c7580067c8f1ae66daabacdf" - integrity sha512-kaA0ueLe2v7KunYOyWYtlf/QhhZb7+qh4Yw6Ni5kgukMIG+iP773tjgBiLWIXYumWCwEq3nLW+TUywEp8uEeag== +"@typescript-eslint/typescript-estree@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz#c47ae7901db3b8bddc3ecd73daff2d0895688c46" + integrity sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ== dependencies: - "@typescript-eslint/types" "8.31.1" - "@typescript-eslint/visitor-keys" "8.31.1" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" - fast-glob "^3.3.2" + globby "^11.1.0" is-glob "^4.0.3" - minimatch "^9.0.4" - semver "^7.6.0" - ts-api-utils "^2.0.1" + minimatch "9.0.3" + semver "^7.5.4" + ts-api-utils "^1.0.1" -"@typescript-eslint/utils@8.31.1": - version "8.31.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-8.31.1.tgz#5628ea0393598a0b2f143d0fc6d019f0dee9dd14" - integrity sha512-2DSI4SNfF5T4oRveQ4nUrSjUqjMND0nLq9rEkz0gfGr3tg0S5KB6DhwR+WZPCjzkZl3cH+4x2ce3EsL50FubjQ== +"@typescript-eslint/utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.21.0.tgz#4714e7a6b39e773c1c8e97ec587f520840cd8134" + integrity sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ== dependencies: "@eslint-community/eslint-utils" "^4.4.0" - "@typescript-eslint/scope-manager" "8.31.1" - "@typescript-eslint/types" "8.31.1" - "@typescript-eslint/typescript-estree" "8.31.1" + "@types/json-schema" "^7.0.12" + "@types/semver" "^7.5.0" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" + semver "^7.5.4" -"@typescript-eslint/visitor-keys@8.31.1": - version "8.31.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-8.31.1.tgz#6742b0e3ba1e0c1e35bdaf78c03e759eb8dd8e75" - integrity sha512-I+/rgqOVBn6f0o7NDTmAPWWC6NuqhV174lfYvAm9fUaWeiefLdux9/YI3/nLugEn9L8fcSi0XmpKi/r5u0nmpw== +"@typescript-eslint/visitor-keys@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz#87a99d077aa507e20e238b11d56cc26ade45fe47" + integrity sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A== + dependencies: + "@typescript-eslint/types" "6.21.0" + eslint-visitor-keys "^3.4.1" + +abort-controller@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" + integrity sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg== dependencies: - "@typescript-eslint/types" "8.31.1" - eslint-visitor-keys "^4.2.0" + event-target-shim "^5.0.0" acorn-jsx@^5.3.2: version "5.3.2" @@ -1052,16 +996,25 @@ acorn-walk@^8.1.1: resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.2.0.tgz#741210f2e2426454508853a2f44d0ab83b7f69c1" integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA== -acorn@^8.14.0: - version "8.14.0" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.14.0.tgz#063e2c70cac5fb4f6467f0b11152e04c682795b0" - integrity sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA== - acorn@^8.4.1: version "8.7.0" resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.7.0.tgz#90951fde0f8f09df93549481e5fc141445b791cf" integrity sha512-V/LGr1APy+PXIwKebEWrkZPwoeoF+w1jiOBUmuxuiUIaOHtob8Qc9BTrYo7VuI5fR8tqsy+buA2WFooR5olqvQ== +acorn@^8.9.0: + version "8.10.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.10.0.tgz#8be5b3907a67221a81ab23c7889c4c5526b62ec5" + integrity sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw== + +agentkeepalive@^4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-4.2.1.tgz#a7975cbb9f83b367f06c90cc51ff28fe7d499717" + integrity sha512-Zn4cw2NEqd+9fiSVWMscnjyQ1a8Yfoc5oBajLeo5w+YBHgDUcEBY2hS4YpTz6iN5f/2zQiktcuM6tS8x1p9dpA== + dependencies: + debug "^4.1.0" + depd "^1.1.2" + humanize-ms "^1.2.1" + aggregate-error@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" @@ -1087,23 +1040,11 @@ ansi-escapes@^4.2.1: dependencies: type-fest "^0.21.3" -ansi-escapes@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-7.0.0.tgz#00fc19f491bbb18e1d481b97868204f92109bfe7" - integrity sha512-GdYO7a61mR0fOlAsvC9/rIHf7L96sBc6dEWzeOu+KAea5bZyQRPIpojrVoI4AXGJS/ycu/fBTdLrUkA4ODrvjw== - dependencies: - environment "^1.0.0" - ansi-regex@^5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== -ansi-regex@^6.1.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654" - integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== - ansi-styles@^3.2.1: version "3.2.1" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" @@ -1123,11 +1064,6 @@ ansi-styles@^5.0.0: resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== -any-promise@^1.0.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f" - integrity sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A== - anymatch@^3.0.3: version "3.1.3" resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" @@ -1153,6 +1089,16 @@ argparse@^2.0.1: resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= + babel-jest@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.7.0.tgz#f4369919225b684c56085998ac63dbd05be020d5" @@ -1218,6 +1164,18 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== +big-integer@^1.6.44: + version "1.6.52" + resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.52.tgz#60a887f3047614a8e1bffe5d7173490a97dc8c85" + integrity sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg== + +bplist-parser@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/bplist-parser/-/bplist-parser-0.2.0.tgz#43a9d183e5bf9d545200ceac3e712f79ebbe8d0e" + integrity sha512-z0M+byMThzQmD9NILRniCUXYsYpjwnlO8N5uCFaCqIOpqRsJCrQL9NK3JsD67CN5a08nF5oIL2bD6loTdHOuKw== + dependencies: + big-integer "^1.6.44" + brace-expansion@^1.1.7: version "1.1.11" resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" @@ -1269,6 +1227,13 @@ buffer-from@^1.0.0: resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== +bundle-name@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/bundle-name/-/bundle-name-3.0.0.tgz#ba59bcc9ac785fb67ccdbf104a2bf60c099f0e1a" + integrity sha512-PKA4BeSvBpQKQ8iPOGCSiell+N8P+Tf1DlwqmYhpe2gAhKPHn8EYOxVT+ShuGmhg8lN8XiSlS80yiExKXrURlw== + dependencies: + run-applescript "^5.0.0" + callsites@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" @@ -1298,7 +1263,7 @@ chalk@^2.4.2: escape-string-regexp "^1.0.5" supports-color "^5.3.0" -chalk@^4.0.0, chalk@^4.1.2: +chalk@^4.0.0: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== @@ -1306,11 +1271,6 @@ chalk@^4.0.0, chalk@^4.1.2: ansi-styles "^4.1.0" supports-color "^7.1.0" -chalk@^5.3.0: - version "5.3.0" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.3.0.tgz#67c20a7ebef70e7f3970a01f90fa210cb6860385" - integrity sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w== - char-regex@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf" @@ -1326,46 +1286,11 @@ cjs-module-lexer@^1.0.0: resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz#6c370ab19f8a3394e318fe682686ec0ac684d107" integrity sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ== -cjs-module-lexer@^1.2.3: - version "1.4.1" - resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.4.1.tgz#707413784dbb3a72aa11c2f2b042a0bef4004170" - integrity sha512-cuSVIHi9/9E/+821Qjdvngor+xpnlwnuwIyZOaLmHBVdXL+gP+I6QQB9VkO7RI77YIcTV+S1W9AreJ5eN63JBA== - clean-stack@^2.0.0: version "2.2.0" resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== -cli-highlight@^2.1.11: - version "2.1.11" - resolved "https://registry.yarnpkg.com/cli-highlight/-/cli-highlight-2.1.11.tgz#49736fa452f0aaf4fae580e30acb26828d2dc1bf" - integrity sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg== - dependencies: - chalk "^4.0.0" - highlight.js "^10.7.1" - mz "^2.4.0" - parse5 "^5.1.1" - parse5-htmlparser2-tree-adapter "^6.0.0" - yargs "^16.0.0" - -cli-table3@^0.6.3, cli-table3@^0.6.5: - version "0.6.5" - resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.5.tgz#013b91351762739c16a9567c21a04632e449bf2f" - integrity sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ== - dependencies: - string-width "^4.2.0" - optionalDependencies: - "@colors/colors" "1.5.0" - -cliui@^7.0.2: - version "7.0.4" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-7.0.4.tgz#a0265ee655476fc807aea9df3df8df7783808b4f" - integrity sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ== - dependencies: - string-width "^4.2.0" - strip-ansi "^6.0.0" - wrap-ansi "^7.0.0" - cliui@^8.0.1: version "8.0.1" resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" @@ -1409,10 +1334,12 @@ color-name@~1.1.4: resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -commander@^10.0.1: - version "10.0.1" - resolved "https://registry.yarnpkg.com/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06" - integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug== +combined-stream@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" + integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== + dependencies: + delayed-stream "~1.0.0" concat-map@0.0.1: version "0.0.1" @@ -1442,7 +1369,7 @@ create-require@^1.1.0: resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333" integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== -cross-spawn@^7.0.3, cross-spawn@^7.0.6: +cross-spawn@^7.0.2, cross-spawn@^7.0.3: version "7.0.6" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== @@ -1458,7 +1385,7 @@ debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2: dependencies: ms "2.1.2" -debug@^4.3.4, debug@^4.3.7: +debug@^4.3.4: version "4.3.7" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== @@ -1480,6 +1407,39 @@ deepmerge@^4.2.2: resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.3.1.tgz#44b5f2147cd3b00d4b56137685966f26fd25dd4a" integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A== +default-browser-id@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/default-browser-id/-/default-browser-id-3.0.0.tgz#bee7bbbef1f4e75d31f98f4d3f1556a14cea790c" + integrity sha512-OZ1y3y0SqSICtE8DE4S8YOE9UZOJ8wO16fKWVP5J1Qz42kV9jcnMVFrEE/noXb/ss3Q4pZIH79kxofzyNNtUNA== + dependencies: + bplist-parser "^0.2.0" + untildify "^4.0.0" + +default-browser@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/default-browser/-/default-browser-4.0.0.tgz#53c9894f8810bf86696de117a6ce9085a3cbc7da" + integrity sha512-wX5pXO1+BrhMkSbROFsyxUm0i/cJEScyNhA4PPxc41ICuv05ZZB/MX28s8aZx6xjmatvebIapF6hLEKEcpneUA== + dependencies: + bundle-name "^3.0.0" + default-browser-id "^3.0.0" + execa "^7.1.1" + titleize "^3.0.0" + +define-lazy-prop@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz#dbb19adfb746d7fc6d734a06b72f4a00d021255f" + integrity sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg== + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk= + +depd@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" + integrity sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak= + detect-newline@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651" @@ -1495,6 +1455,20 @@ diff@^4.0.1: resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + +doctrine@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961" + integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w== + dependencies: + esutils "^2.0.2" + electron-to-chromium@^1.4.601: version "1.4.614" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.614.tgz#2fe789d61fa09cb875569f37c309d0c2701f91c0" @@ -1510,16 +1484,6 @@ emoji-regex@^8.0.0: resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== -emojilib@^2.4.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/emojilib/-/emojilib-2.4.0.tgz#ac518a8bb0d5f76dda57289ccb2fdf9d39ae721e" - integrity sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw== - -environment@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/environment/-/environment-1.1.0.tgz#8e86c66b180f363c7ab311787e0259665f45a9f1" - integrity sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q== - error-ex@^1.3.1: version "1.3.2" resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" @@ -1547,95 +1511,100 @@ escape-string-regexp@^4.0.0: resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== -eslint-plugin-prettier@^5.4.1: - version "5.4.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-5.4.1.tgz#99b55d7dd70047886b2222fdd853665f180b36af" - integrity sha512-9dF+KuU/Ilkq27A8idRP7N2DH8iUR6qXcjF3FR2wETY21PZdBrIjwCau8oboyGj9b7etWmTGEeM8e7oOed6ZWg== +eslint-plugin-prettier@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-5.0.1.tgz#a3b399f04378f79f066379f544e42d6b73f11515" + integrity sha512-m3u5RnR56asrwV/lDC4GHorlW75DsFfmUcjfCYylTUs85dBRnB7VM6xG8eCMJdeDRnppzmxZVf1GEPJvl1JmNg== dependencies: prettier-linter-helpers "^1.0.0" - synckit "^0.11.7" + synckit "^0.8.5" -eslint-plugin-unused-imports@^4.1.4: - version "4.1.4" - resolved "https://registry.yarnpkg.com/eslint-plugin-unused-imports/-/eslint-plugin-unused-imports-4.1.4.tgz#62ddc7446ccbf9aa7b6f1f0b00a980423cda2738" - integrity sha512-YptD6IzQjDardkl0POxnnRBhU1OEePMV0nd6siHaRBbd+lyh6NAhFEobiznKU7kTsSsDeSD62Pe7kAM1b7dAZQ== +eslint-plugin-unused-imports@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-unused-imports/-/eslint-plugin-unused-imports-3.0.0.tgz#d25175b0072ff16a91892c3aa72a09ca3a9e69e7" + integrity sha512-sduiswLJfZHeeBJ+MQaG+xYzSWdRXoSw61DpU13mzWumCkR0ufD0HmO4kdNokjrkluMHpj/7PJeN35pgbhW3kw== + dependencies: + eslint-rule-composer "^0.3.0" -eslint-scope@^8.2.0: - version "8.2.0" - resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-8.2.0.tgz#377aa6f1cb5dc7592cfd0b7f892fd0cf352ce442" - integrity sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A== +eslint-rule-composer@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/eslint-rule-composer/-/eslint-rule-composer-0.3.0.tgz#79320c927b0c5c0d3d3d2b76c8b4a488f25bbaf9" + integrity sha512-bt+Sh8CtDmn2OajxvNO+BX7Wn4CIWMpTRm3MaiKPCQcnnlm0CS2mhui6QaoeQugs+3Kj2ESKEEGJUdVafwhiCg== + +eslint-scope@^7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-7.2.2.tgz#deb4f92563390f32006894af62a22dba1c46423f" + integrity sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg== dependencies: esrecurse "^4.3.0" estraverse "^5.2.0" -eslint-visitor-keys@^3.3.0: +eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4.3: version "3.4.3" resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== -eslint-visitor-keys@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz#687bacb2af884fcdda8a6e7d65c606f46a14cd45" - integrity sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw== - -eslint@^9.20.1: - version "9.20.1" - resolved "https://registry.yarnpkg.com/eslint/-/eslint-9.20.1.tgz#923924c078f5226832449bac86662dd7e53c91d6" - integrity sha512-m1mM33o6dBUjxl2qb6wv6nGNwCAsns1eKtaQ4l/NPHeTvhiUPbtdfMyktxN4B3fgHIgsYh1VT3V9txblpQHq+g== +eslint@^8.49.0: + version "8.50.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.50.0.tgz#2ae6015fee0240fcd3f83e1e25df0287f487d6b2" + integrity sha512-FOnOGSuFuFLv/Sa+FDVRZl4GGVAAFFi8LecRsI5a1tMO5HIE8nCm4ivAlzt4dT3ol/PaaGC0rJEEXQmHJBGoOg== dependencies: "@eslint-community/eslint-utils" "^4.2.0" - "@eslint-community/regexpp" "^4.12.1" - "@eslint/config-array" "^0.19.0" - "@eslint/core" "^0.11.0" - "@eslint/eslintrc" "^3.2.0" - "@eslint/js" "9.20.0" - "@eslint/plugin-kit" "^0.2.5" - "@humanfs/node" "^0.16.6" + "@eslint-community/regexpp" "^4.6.1" + "@eslint/eslintrc" "^2.1.2" + "@eslint/js" "8.50.0" + "@humanwhocodes/config-array" "^0.11.11" "@humanwhocodes/module-importer" "^1.0.1" - "@humanwhocodes/retry" "^0.4.1" - "@types/estree" "^1.0.6" - "@types/json-schema" "^7.0.15" + "@nodelib/fs.walk" "^1.2.8" ajv "^6.12.4" chalk "^4.0.0" - cross-spawn "^7.0.6" + cross-spawn "^7.0.2" debug "^4.3.2" + doctrine "^3.0.0" escape-string-regexp "^4.0.0" - eslint-scope "^8.2.0" - eslint-visitor-keys "^4.2.0" - espree "^10.3.0" - esquery "^1.5.0" + eslint-scope "^7.2.2" + eslint-visitor-keys "^3.4.3" + espree "^9.6.1" + esquery "^1.4.2" esutils "^2.0.2" fast-deep-equal "^3.1.3" - file-entry-cache "^8.0.0" + file-entry-cache "^6.0.1" find-up "^5.0.0" glob-parent "^6.0.2" + globals "^13.19.0" + graphemer "^1.4.0" ignore "^5.2.0" imurmurhash "^0.1.4" is-glob "^4.0.0" + is-path-inside "^3.0.3" + js-yaml "^4.1.0" json-stable-stringify-without-jsonify "^1.0.1" + levn "^0.4.1" lodash.merge "^4.6.2" minimatch "^3.1.2" natural-compare "^1.4.0" optionator "^0.9.3" + strip-ansi "^6.0.1" + text-table "^0.2.0" -espree@^10.0.1, espree@^10.3.0: - version "10.3.0" - resolved "https://registry.yarnpkg.com/espree/-/espree-10.3.0.tgz#29267cf5b0cb98735b65e64ba07e0ed49d1eed8a" - integrity sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg== +espree@^9.6.0, espree@^9.6.1: + version "9.6.1" + resolved "https://registry.yarnpkg.com/espree/-/espree-9.6.1.tgz#a2a17b8e434690a5432f2f8018ce71d331a48c6f" + integrity sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ== dependencies: - acorn "^8.14.0" + acorn "^8.9.0" acorn-jsx "^5.3.2" - eslint-visitor-keys "^4.2.0" + eslint-visitor-keys "^3.4.1" esprima@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== -esquery@^1.5.0: - version "1.6.0" - resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.6.0.tgz#91419234f804d852a82dceec3e16cdc22cf9dae7" - integrity sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg== +esquery@^1.4.2: + version "1.5.0" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.5.0.tgz#6ce17738de8577694edd7361c57182ac8cb0db0b" + integrity sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg== dependencies: estraverse "^5.1.0" @@ -1656,6 +1625,11 @@ esutils@^2.0.2: resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== +event-target-shim@^5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/event-target-shim/-/event-target-shim-5.0.1.tgz#5d4d3ebdf9583d63a5333ce2deb7480ab2b05789" + integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ== + execa@^5.0.0: version "5.1.1" resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" @@ -1671,6 +1645,21 @@ execa@^5.0.0: signal-exit "^3.0.3" strip-final-newline "^2.0.0" +execa@^7.1.1: + version "7.2.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-7.2.0.tgz#657e75ba984f42a70f38928cedc87d6f2d4fe4e9" + integrity sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA== + dependencies: + cross-spawn "^7.0.3" + get-stream "^6.0.1" + human-signals "^4.3.0" + is-stream "^3.0.0" + merge-stream "^2.0.0" + npm-run-path "^5.1.0" + onetime "^6.0.0" + signal-exit "^3.0.7" + strip-final-newline "^3.0.0" + exit@^0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" @@ -1697,7 +1686,18 @@ fast-diff@^1.1.2: resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.3.0.tgz#ece407fa550a64d638536cd727e129c61616e0f0" integrity sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw== -fast-glob@^3.3.2: +fast-glob@^3.2.12: + version "3.2.12" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.12.tgz#7f39ec99c2e6ab030337142da9e0c18f37afae80" + integrity sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + +fast-glob@^3.2.9, fast-glob@^3.3.0: version "3.3.2" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== @@ -1732,17 +1732,12 @@ fb-watchman@^2.0.0: dependencies: bser "2.1.1" -fflate@^0.8.2: - version "0.8.2" - resolved "https://registry.yarnpkg.com/fflate/-/fflate-0.8.2.tgz#fc8631f5347812ad6028bbe4a2308b2792aa1dea" - integrity sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A== - -file-entry-cache@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-8.0.0.tgz#7787bddcf1131bffb92636c69457bbc0edd6d81f" - integrity sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ== +file-entry-cache@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz#211b2dd9659cb0394b073e7323ac3c933d522027" + integrity sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg== dependencies: - flat-cache "^4.0.0" + flat-cache "^3.0.4" fill-range@^7.1.1: version "7.1.1" @@ -1767,18 +1762,40 @@ find-up@^5.0.0: locate-path "^6.0.0" path-exists "^4.0.0" -flat-cache@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-4.0.1.tgz#0ece39fcb14ee012f4b0410bd33dd9c1f011127c" - integrity sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw== +flat-cache@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-3.0.4.tgz#61b0338302b2fe9f957dcc32fc2a87f1c3048b11" + integrity sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg== dependencies: - flatted "^3.2.9" - keyv "^4.5.4" + flatted "^3.1.0" + rimraf "^3.0.2" -flatted@^3.2.9: - version "3.3.2" - resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.3.2.tgz#adba1448a9841bec72b42c532ea23dbbedef1a27" - integrity sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA== +flatted@^3.1.0: + version "3.2.7" + resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.2.7.tgz#609f39207cb614b89d0765b477cb2d437fbf9787" + integrity sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ== + +form-data-encoder@1.7.2: + version "1.7.2" + resolved "https://registry.yarnpkg.com/form-data-encoder/-/form-data-encoder-1.7.2.tgz#1f1ae3dccf58ed4690b86d87e4f57c654fbab040" + integrity sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A== + +form-data@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.1.tgz#ebd53791b78356a99af9a300d4282c4d5eb9755f" + integrity sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.8" + mime-types "^2.1.12" + +formdata-node@^4.3.2: + version "4.3.3" + resolved "https://registry.yarnpkg.com/formdata-node/-/formdata-node-4.3.3.tgz#21415225be66e2c87a917bfc0fedab30a119c23c" + integrity sha512-coTew7WODO2vF+XhpUdmYz4UBvlsiTMSNaFYZlrXIqYbFd4W7bMwnoALNLE6uvNgzTg2j1JDF0ZImEfF06VPAA== + dependencies: + node-domexception "1.0.0" + web-streams-polyfill "4.0.0-beta.1" fs.realpath@^1.0.0: version "1.0.0" @@ -1815,7 +1832,7 @@ get-stdin@^8.0.0: resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-8.0.0.tgz#cbad6a73feb75f6eeb22ba9e01f89aa28aa97a53" integrity sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg== -get-stream@^6.0.0: +get-stream@^6.0.0, get-stream@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== @@ -1846,26 +1863,29 @@ glob@^7.1.3, glob@^7.1.4: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^8.0.1: - version "8.1.0" - resolved "https://registry.yarnpkg.com/glob/-/glob-8.1.0.tgz#d388f656593ef708ee3e34640fdfb99a9fd1c33e" - integrity sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^5.0.1" - once "^1.3.0" - globals@^11.1.0: version "11.12.0" resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== -globals@^14.0.0: - version "14.0.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-14.0.0.tgz#898d7413c29babcf6bafe56fcadded858ada724e" - integrity sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ== +globals@^13.19.0: + version "13.20.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-13.20.0.tgz#ea276a1e508ffd4f1612888f9d1bad1e2717bf82" + integrity sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ== + dependencies: + type-fest "^0.20.2" + +globby@^11.1.0: + version "11.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" + integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.2.9" + ignore "^5.2.0" + merge2 "^1.4.1" + slash "^3.0.0" graceful-fs@^4.2.9: version "4.2.11" @@ -1894,11 +1914,6 @@ hasown@^2.0.0: dependencies: function-bind "^1.1.2" -highlight.js@^10.7.1: - version "10.7.3" - resolved "https://registry.yarnpkg.com/highlight.js/-/highlight.js-10.7.3.tgz#697272e3991356e40c3cac566a74eef681756531" - integrity sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A== - html-escaper@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" @@ -1909,6 +1924,18 @@ human-signals@^2.1.0: resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw== +human-signals@^4.3.0: + version "4.3.1" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-4.3.1.tgz#ab7f811e851fca97ffbd2c1fe9a958964de321b2" + integrity sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ== + +humanize-ms@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/humanize-ms/-/humanize-ms-1.2.1.tgz#c46e3159a293f6b896da29316d8b6fe8bb79bbed" + integrity sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0= + dependencies: + ms "^2.0.0" + iconv-lite@^0.6.3: version "0.6.3" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" @@ -1916,14 +1943,7 @@ iconv-lite@^0.6.3: dependencies: safer-buffer ">= 2.1.2 < 3.0.0" -ignore-walk@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-5.0.1.tgz#5f199e23e1288f518d90358d461387788a154776" - integrity sha512-yemi4pMf51WKT7khInJqAvsIGzoqYXblnsz0ql8tM+yi1EKYTY1evX4NAbJrLL/Aanr2HyZeluqU+Oi7MGHokw== - dependencies: - minimatch "^5.0.1" - -ignore@^5.2.0, ignore@^5.3.1: +ignore@^5.2.0, ignore@^5.2.4: version "5.3.2" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== @@ -1979,6 +1999,16 @@ is-core-module@^2.13.0: dependencies: hasown "^2.0.0" +is-docker@^2.0.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.2.1.tgz#33eeabe23cfe86f14bde4408a02c0cfb853acdaa" + integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ== + +is-docker@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-3.0.0.tgz#90093aa3106277d8a77a5910dbae71747e15a200" + integrity sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ== + is-extglob@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" @@ -2001,16 +2031,40 @@ is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3: dependencies: is-extglob "^2.1.1" +is-inside-container@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-inside-container/-/is-inside-container-1.0.0.tgz#e81fba699662eb31dbdaf26766a61d4814717ea4" + integrity sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA== + dependencies: + is-docker "^3.0.0" + is-number@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== +is-path-inside@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.3.tgz#d231362e53a07ff2b0e0ea7fed049161ffd16283" + integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ== + is-stream@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== +is-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-3.0.0.tgz#e6bfd7aa6bef69f4f472ce9bb681e3e57b4319ac" + integrity sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA== + +is-wsl@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" + integrity sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww== + dependencies: + is-docker "^2.0.0" + isexe@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" @@ -2452,11 +2506,6 @@ jsesc@^2.5.1: resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== -json-buffer@3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13" - integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== - json-parse-even-better-errors@^2.3.0: version "2.3.1" resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" @@ -2482,13 +2531,6 @@ jsonc-parser@^3.2.0: resolved "https://registry.yarnpkg.com/jsonc-parser/-/jsonc-parser-3.2.1.tgz#031904571ccf929d7670ee8c547545081cb37f1a" integrity sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA== -keyv@^4.5.4: - version "4.5.4" - resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.5.4.tgz#a879a99e29452f942439f2a405e3af8b31d4de93" - integrity sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== - dependencies: - json-buffer "3.0.1" - kleur@^3.0.3: version "3.0.3" resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" @@ -2536,11 +2578,6 @@ lodash.merge@^4.6.2: resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== -lru-cache@^10.4.3: - version "10.4.3" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.4.3.tgz#410fc8a17b70e598013df257c2446b7f3383f119" - integrity sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ== - lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" @@ -2574,30 +2611,12 @@ makeerror@1.0.12: dependencies: tmpl "1.0.5" -marked-terminal@^7.1.0: - version "7.2.1" - resolved "https://registry.yarnpkg.com/marked-terminal/-/marked-terminal-7.2.1.tgz#9c1ae073a245a03c6a13e3eeac6f586f29856068" - integrity sha512-rQ1MoMFXZICWNsKMiiHwP/Z+92PLKskTPXj+e7uwXmuMPkNn7iTqC+IvDekVm1MPeC9wYQeLxeFaOvudRR/XbQ== - dependencies: - ansi-escapes "^7.0.0" - ansi-regex "^6.1.0" - chalk "^5.3.0" - cli-highlight "^2.1.11" - cli-table3 "^0.6.5" - node-emoji "^2.1.3" - supports-hyperlinks "^3.1.0" - -marked@^9.1.2: - version "9.1.6" - resolved "https://registry.yarnpkg.com/marked/-/marked-9.1.6.tgz#5d2a3f8180abfbc5d62e3258a38a1c19c0381695" - integrity sha512-jcByLnIFkd5gSXZmjNvS1TlmRhCXZjIzHYlaGkPlLIekG55JDR2Z4va9tZwCiP+/RDERiNhMOFu01xd6O5ct1Q== - merge-stream@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== -merge2@^1.3.0: +merge2@^1.3.0, merge2@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== @@ -2610,75 +2629,73 @@ micromatch@^4.0.4: braces "^3.0.3" picomatch "^2.3.1" +mime-db@1.51.0: + version "1.51.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.51.0.tgz#d9ff62451859b18342d960850dc3cfb77e63fb0c" + integrity sha512-5y8A56jg7XVQx2mbv1lu49NR4dokRnhZYTtL+KGfaa27uq4pSTXkwQkFJl4pkRMyNFz/EtYDSkiiEHx3F7UN6g== + +mime-types@^2.1.12: + version "2.1.34" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.34.tgz#5a712f9ec1503511a945803640fafe09d3793c24" + integrity sha512-6cP692WwGIs9XXdOO4++N+7qjqv0rqxxVvJ3VHPh/Sc9mVZcQP+ZGhkKiTvWMQRr2tbHkJP/Yn7Y0npb3ZBs4A== + dependencies: + mime-db "1.51.0" + mimic-fn@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== -minimatch@^3.0.4, minimatch@^3.1.1, minimatch@^3.1.2: - version "3.1.2" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" - integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== - dependencies: - brace-expansion "^1.1.7" +mimic-fn@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-4.0.0.tgz#60a90550d5cb0b239cca65d893b1a53b29871ecc" + integrity sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw== -minimatch@^5.0.1: - version "5.1.6" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96" - integrity sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g== +minimatch@9.0.3: + version "9.0.3" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825" + integrity sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg== dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.4: - version "9.0.5" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" - integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== +minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== dependencies: - brace-expansion "^2.0.1" + brace-expansion "^1.1.7" minimist@^1.2.6: version "1.2.6" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== -mri@^1.1.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/mri/-/mri-1.2.0.tgz#6721480fec2a11a4889861115a48b6cbe7cc8f0b" - integrity sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA== - ms@2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@^2.1.3: +ms@^2.0.0, ms@^2.1.3: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== -mz@^2.4.0: - version "2.7.0" - resolved "https://registry.yarnpkg.com/mz/-/mz-2.7.0.tgz#95008057a56cafadc2bc63dde7f9ff6955948e32" - integrity sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q== - dependencies: - any-promise "^1.0.0" - object-assign "^4.0.1" - thenify-all "^1.0.0" - natural-compare@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== -node-emoji@^2.1.3: - version "2.1.3" - resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-2.1.3.tgz#93cfabb5cc7c3653aa52f29d6ffb7927d8047c06" - integrity sha512-E2WEOVsgs7O16zsURJ/eH8BqhF029wGpEOnv7Urwdo2wmQanOACwJQh0devF9D9RhoZru0+9JXIS0dBXIAz+lA== +node-domexception@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/node-domexception/-/node-domexception-1.0.0.tgz#6888db46a1f71c0b76b3f7555016b63fe64766e5" + integrity sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ== + +node-fetch@^2.6.7: + version "2.6.11" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.11.tgz#cde7fc71deef3131ef80a738919f999e6edfff25" + integrity sha512-4I6pdBY1EthSqDmJkiNk3JIT8cswwR9nfeW/cPdUagJYEQG7R95WRH74wpz7ma8Gh/9dI9FP+OU+0E4FvtA55w== dependencies: - "@sindresorhus/is" "^4.6.0" - char-regex "^1.0.2" - emojilib "^2.4.0" - skin-tone "^2.0.0" + whatwg-url "^5.0.0" node-int64@^0.4.0: version "0.4.0" @@ -2695,28 +2712,6 @@ normalize-path@^3.0.0: resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== -npm-bundled@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-2.0.1.tgz#94113f7eb342cd7a67de1e789f896b04d2c600f4" - integrity sha512-gZLxXdjEzE/+mOstGDqR6b0EkhJ+kM6fxM6vUuckuctuVPh80Q6pw/rSZj9s4Gex9GxWtIicO1pc8DB9KZWudw== - dependencies: - npm-normalize-package-bin "^2.0.0" - -npm-normalize-package-bin@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-2.0.0.tgz#9447a1adaaf89d8ad0abe24c6c84ad614a675fff" - integrity sha512-awzfKUO7v0FscrSpRoogyNm0sajikhBWpU0QMrW09AMi9n1PoKU6WaIqUzuJSQnpciZZmJ/jMZ2Egfmb/9LiWQ== - -npm-packlist@^5.1.3: - version "5.1.3" - resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-5.1.3.tgz#69d253e6fd664b9058b85005905012e00e69274b" - integrity sha512-263/0NGrn32YFYi4J533qzrQ/krmmrWwhKkzwTuM4f/07ug51odoaNjUexxO4vxlzURHcmYMH1QjvHjsNDKLVg== - dependencies: - glob "^8.0.1" - ignore-walk "^5.0.1" - npm-bundled "^2.0.0" - npm-normalize-package-bin "^2.0.0" - npm-run-path@^4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" @@ -2724,10 +2719,12 @@ npm-run-path@^4.0.1: dependencies: path-key "^3.0.0" -object-assign@^4.0.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" - integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== +npm-run-path@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-5.1.0.tgz#bc62f7f3f6952d9894bd08944ba011a6ee7b7e00" + integrity sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q== + dependencies: + path-key "^4.0.0" once@^1.3.0: version "1.4.0" @@ -2743,6 +2740,23 @@ onetime@^5.1.2: dependencies: mimic-fn "^2.1.0" +onetime@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-6.0.0.tgz#7c24c18ed1fd2e9bca4bd26806a33613c77d34b4" + integrity sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ== + dependencies: + mimic-fn "^4.0.0" + +open@^9.1.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/open/-/open-9.1.0.tgz#684934359c90ad25742f5a26151970ff8c6c80b6" + integrity sha512-OS+QTnw1/4vrf+9hh1jc1jnYjzSG4ttTBB8UxOwAnInG3Uo4ssetzC1ihqaIHjLJnA5GGlRl6QlZXOTQhRBUvg== + dependencies: + default-browser "^4.0.0" + define-lazy-prop "^3.0.0" + is-inside-container "^1.0.0" + is-wsl "^2.2.0" + optionator@^0.9.3: version "0.9.3" resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.3.tgz#007397d44ed1872fdc6ed31360190f81814e2c64" @@ -2819,23 +2833,6 @@ parse-json@^5.2.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -parse5-htmlparser2-tree-adapter@^6.0.0: - version "6.0.1" - resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz#2cdf9ad823321140370d4dbf5d3e92c7c8ddc6e6" - integrity sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA== - dependencies: - parse5 "^6.0.1" - -parse5@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.1.tgz#f68e4e5ba1852ac2cadc00f4555fff6c2abb6178" - integrity sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug== - -parse5@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" - integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== - path-exists@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" @@ -2851,21 +2848,26 @@ path-key@^3.0.0, path-key@^3.1.0: resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== +path-key@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-4.0.0.tgz#295588dc3aee64154f877adb9d780b81c554bf18" + integrity sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ== + path-parse@^1.0.7: version "1.0.7" resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + picocolors@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== -picocolors@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" - integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== - picomatch@^2.0.4, picomatch@^2.2.3, picomatch@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" @@ -2917,15 +2919,6 @@ prompts@^2.0.1: kleur "^3.0.3" sisteransi "^1.0.5" -publint@^0.2.12: - version "0.2.12" - resolved "https://registry.yarnpkg.com/publint/-/publint-0.2.12.tgz#d25cd6bd243d5bdd640344ecdddb3eeafdcc4059" - integrity sha512-YNeUtCVeM4j9nDiTT2OPczmlyzOkIXNtdDZnSuajAxS/nZ6j3t7Vs9SUB4euQNddiltIwu7Tdd3s+hr08fAsMw== - dependencies: - npm-packlist "^5.1.3" - picocolors "^1.1.1" - sade "^1.8.1" - punycode@^2.1.0: version "2.3.0" resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.0.tgz#f67fa67c94da8f4d0cfff981aee4118064199b8f" @@ -2996,6 +2989,20 @@ reusify@^1.0.4: resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== +rimraf@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" + integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== + dependencies: + glob "^7.1.3" + +run-applescript@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/run-applescript/-/run-applescript-5.0.0.tgz#e11e1c932e055d5c6b40d98374e0268d9b11899c" + integrity sha512-XcT5rBksx1QdIhlFOCtgZkB99ZEouFZ1E2Kc2LHqNW13U3/74YGdkQRmThTwxy4QIyookibDKYZOPqX//6BlAg== + dependencies: + execa "^5.0.0" + run-parallel@^1.1.9: version "1.2.0" resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" @@ -3003,13 +3010,6 @@ run-parallel@^1.1.9: dependencies: queue-microtask "^1.2.2" -sade@^1.8.1: - version "1.8.1" - resolved "https://registry.yarnpkg.com/sade/-/sade-1.8.1.tgz#0a78e81d658d394887be57d2a409bf703a3b2701" - integrity sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A== - dependencies: - mri "^1.1.0" - safe-buffer@~5.2.0: version "5.2.1" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" @@ -3037,11 +3037,6 @@ semver@^7.5.4: resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== -semver@^7.6.0: - version "7.7.1" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.7.1.tgz#abd5098d82b18c6c81f6074ff2647fd3e7220c9f" - integrity sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA== - shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" @@ -3064,13 +3059,6 @@ sisteransi@^1.0.5: resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== -skin-tone@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/skin-tone/-/skin-tone-2.0.0.tgz#4e3933ab45c0d4f4f781745d64b9f4c208e41237" - integrity sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA== - dependencies: - unicode-emoji-modifier-base "^1.0.0" - slash@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" @@ -3154,15 +3142,20 @@ strip-final-newline@^2.0.0: resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== +strip-final-newline@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-3.0.0.tgz#52894c313fbff318835280aed60ff71ebf12b8fd" + integrity sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw== + strip-json-comments@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== -superstruct@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/superstruct/-/superstruct-1.0.4.tgz#0adb99a7578bd2f1c526220da6571b2d485d91ca" - integrity sha512-7JpaAoX2NGyoFlI9NBh66BQXGONc+uE+MRS5i2iOBKuS4e+ccgMDjATgZldkah+33DakBxDHiss9kvUcGAO8UQ== +superstruct@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/superstruct/-/superstruct-1.0.3.tgz#de626a5b49c6641ff4d37da3c7598e7a87697046" + integrity sha512-8iTn3oSS8nRGn+C2pgXSKPI3jmpm6FExNazNpjvqS6ZUJQCej3PUXEKM8NjHBOs54ExM+LPW/FBRhymrdcCiSg== supports-color@^5.3.0: version "5.5.0" @@ -3171,7 +3164,7 @@ supports-color@^5.3.0: dependencies: has-flag "^3.0.0" -supports-color@^7.0.0, supports-color@^7.1.0: +supports-color@^7.1.0: version "7.2.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== @@ -3185,25 +3178,18 @@ supports-color@^8.0.0: dependencies: has-flag "^4.0.0" -supports-hyperlinks@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-3.1.0.tgz#b56150ff0173baacc15f21956450b61f2b18d3ac" - integrity sha512-2rn0BZ+/f7puLOHZm1HOJfwBggfaHXUpPUSSG/SWM4TWp5KCfmNYwnC3hruy2rZlMnmWZ+QAGpZfchu3f3695A== - dependencies: - has-flag "^4.0.0" - supports-color "^7.0.0" - supports-preserve-symlinks-flag@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== -synckit@^0.11.7: - version "0.11.8" - resolved "https://registry.yarnpkg.com/synckit/-/synckit-0.11.8.tgz#b2aaae998a4ef47ded60773ad06e7cb821f55457" - integrity sha512-+XZ+r1XGIJGeQk3VvXhT6xx/VpbHsRzsTkGgF6E5RX9TTXD0118l87puaEBZ566FhqblC6U0d4XnubznJDm30A== +synckit@^0.8.5: + version "0.8.6" + resolved "https://registry.yarnpkg.com/synckit/-/synckit-0.8.6.tgz#b69b7fbce3917c2673cbdc0d87fb324db4a5b409" + integrity sha512-laHF2savN6sMeHCjLRkheIU4wo3Zg9Ln5YOjOo7sZ5dVQW8yF5pPE5SIw1dsPhq3TRp1jisKRCdPhfs/1WMqDA== dependencies: - "@pkgr/core" "^0.2.4" + "@pkgr/utils" "^2.4.2" + tslib "^2.6.2" test-exclude@^6.0.0: version "6.0.0" @@ -3214,19 +3200,15 @@ test-exclude@^6.0.0: glob "^7.1.4" minimatch "^3.0.4" -thenify-all@^1.0.0: - version "1.6.0" - resolved "https://registry.yarnpkg.com/thenify-all/-/thenify-all-1.6.0.tgz#1a1918d402d8fc3f98fbf234db0bcc8cc10e9726" - integrity sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA== - dependencies: - thenify ">= 3.1.0 < 4" +text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== -"thenify@>= 3.1.0 < 4": - version "3.3.1" - resolved "https://registry.yarnpkg.com/thenify/-/thenify-3.3.1.tgz#8932e686a4066038a016dd9e2ca46add9838a95f" - integrity sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw== - dependencies: - any-promise "^1.0.0" +titleize@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/titleize/-/titleize-3.0.0.tgz#71c12eb7fdd2558aa8a44b0be83b8a76694acd53" + integrity sha512-KxVu8EYHDPBdUYdKZdKtU2aj2XfEx9AfjXxE/Aj0vT06w2icA09Vus1rh6eSu1y01akYg6BjIK/hxyLJINoMLQ== tmpl@1.0.5: version "1.0.5" @@ -3245,10 +3227,15 @@ to-regex-range@^5.0.1: dependencies: is-number "^7.0.0" -ts-api-utils@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-2.0.1.tgz#660729385b625b939aaa58054f45c058f33f10cd" - integrity sha512-dnlgjFSVetynI8nzgJ+qF62efpglpWRk8isUEWZGWlJYySCTD6aKvbUDu+zbPeDakk3bg5H4XpitHukgfL1m9w== +tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + integrity sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o= + +ts-api-utils@^1.0.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.3.0.tgz#4b490e27129f1e8e686b45cc4ab63714dc60eea1" + integrity sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ== ts-jest@^29.1.0: version "29.1.1" @@ -3283,20 +3270,21 @@ ts-node@^10.5.0: v8-compile-cache-lib "^3.0.0" yn "3.1.1" -"tsc-multi@https://github.com/stainless-api/tsc-multi/releases/download/v1.1.8/tsc-multi.tgz": - version "1.1.8" - resolved "https://github.com/stainless-api/tsc-multi/releases/download/v1.1.8/tsc-multi.tgz#f544b359b8f05e607771ffacc280e58201476b04" +tsc-multi@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/tsc-multi/-/tsc-multi-1.1.0.tgz#0e2b03c0ed0ac58ecb556f11709441102d202680" + integrity sha512-THE6X+sse7EZ2qMhqXvBhd2HMTvXyWwYnx+2T/ijqdp/6Rf7rUc2uPRzPdrrljZCNcYDeL0qP2P7tqm2IwayTg== dependencies: - debug "^4.3.7" - fast-glob "^3.3.2" + debug "^4.3.4" + fast-glob "^3.2.12" get-stdin "^8.0.0" p-all "^3.0.0" - picocolors "^1.1.1" + picocolors "^1.0.0" signal-exit "^3.0.7" string-to-stream "^3.0.1" - superstruct "^1.0.4" - tslib "^2.8.1" - yargs "^17.7.2" + superstruct "^1.0.3" + tslib "^2.5.0" + yargs "^17.7.1" tsconfig-paths@^4.0.0: version "4.2.0" @@ -3307,10 +3295,15 @@ tsconfig-paths@^4.0.0: minimist "^1.2.6" strip-bom "^3.0.0" -tslib@^2.8.1: - version "2.8.1" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" - integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== +tslib@^2.5.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.0.tgz#b295854684dbda164e181d259a22cd779dcd7bc3" + integrity sha512-7At1WUettjcSRHXCyYtTselblcHl9PJFFVKiCAy/bY97+BPZXSQ2wbq0P9s8tK2G7dFQfNnlJnPAiArVBVBsfA== + +tslib@^2.6.0, tslib@^2.6.2: + version "2.6.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" + integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== type-check@^0.4.0, type-check@~0.4.0: version "0.4.0" @@ -3324,44 +3317,30 @@ type-detect@4.0.8: resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== +type-fest@^0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.20.2.tgz#1bf207f4b28f91583666cb5fbd327887301cd5f4" + integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ== + type-fest@^0.21.3: version "0.21.3" resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== -typescript-eslint@8.31.1: - version "8.31.1" - resolved "https://registry.yarnpkg.com/typescript-eslint/-/typescript-eslint-8.31.1.tgz#b77ab1e48ced2daab9225ff94bab54391a4af69b" - integrity sha512-j6DsEotD/fH39qKzXTQRwYYWlt7D+0HmfpOK+DVhwJOFLcdmn92hq3mBb7HlKJHbjjI/gTOqEcc9d6JfpFf/VA== - dependencies: - "@typescript-eslint/eslint-plugin" "8.31.1" - "@typescript-eslint/parser" "8.31.1" - "@typescript-eslint/utils" "8.31.1" - -typescript@5.6.1-rc: - version "5.6.1-rc" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.6.1-rc.tgz#d5e4d7d8170174fed607b74cc32aba3d77018e02" - integrity sha512-E3b2+1zEFu84jB0YQi9BORDjz9+jGbwwy1Zi3G0LUNw7a7cePUrHMRNy8aPh53nXpkFGVHSxIZo5vKTfYaFiBQ== - -typescript@5.8.3: - version "5.8.3" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.8.3.tgz#92f8a3e5e3cf497356f4178c34cd65a7f5e8440e" - integrity sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ== +typescript@^4.8.2: + version "4.9.5" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a" + integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g== undici-types@~5.26.4: version "5.26.5" resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== -undici-types@~6.19.2: - version "6.19.8" - resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02" - integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== - -unicode-emoji-modifier-base@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz#dbbd5b54ba30f287e2a8d5a249da6c0cef369459" - integrity sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g== +untildify@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/untildify/-/untildify-4.0.0.tgz#2bc947b953652487e4600949fb091e3ae8cd919b" + integrity sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw== update-browserslist-db@^1.0.13: version "1.0.13" @@ -3397,11 +3376,6 @@ v8-to-istanbul@^9.0.1: "@types/istanbul-lib-coverage" "^2.0.1" convert-source-map "^2.0.0" -validate-npm-package-name@^5.0.0: - version "5.0.1" - resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-5.0.1.tgz#a316573e9b49f3ccd90dbb6eb52b3f06c6d604e8" - integrity sha512-OljLrQ9SQdOUqTaQxqL5dEfZWrXExyyWsozYlAWFawPVNuD83igl7uJD2RTkNMbniIYgt8l81eCJGIdQF7avLQ== - walker@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f" @@ -3409,6 +3383,24 @@ walker@^1.0.8: dependencies: makeerror "1.0.12" +web-streams-polyfill@4.0.0-beta.1: + version "4.0.0-beta.1" + resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.1.tgz#3b19b9817374b7cee06d374ba7eeb3aeb80e8c95" + integrity sha512-3ux37gEX670UUphBF9AMCq8XM6iQ8Ac6A+DSRRjDoRBm1ufCkaCDdNVbaqq60PsEkdNlLKrGtv/YBP4EJXqNtQ== + +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + integrity sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE= + +whatwg-url@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" + integrity sha1-lmRU6HZUYuN2RNNib2dCzotwll0= + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" + which@^2.0.1: version "2.0.2" resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" @@ -3453,30 +3445,12 @@ yallist@^4.0.0: resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== -yargs-parser@^20.2.2: - version "20.2.9" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" - integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== - yargs-parser@^21.0.1, yargs-parser@^21.1.1: version "21.1.1" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== -yargs@^16.0.0: - version "16.2.0" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" - integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== - dependencies: - cliui "^7.0.2" - escalade "^3.1.1" - get-caller-file "^2.0.5" - require-directory "^2.1.1" - string-width "^4.2.0" - y18n "^5.0.5" - yargs-parser "^20.2.2" - -yargs@^17.3.1, yargs@^17.7.2: +yargs@^17.3.1, yargs@^17.7.1: version "17.7.2" resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==