diff --git a/.stats.yml b/.stats.yml
index fcbfe481..699660ea 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 2
+configured_endpoints: 8
diff --git a/README.md b/README.md
index 8aaeb5a4..f54782b1 100644
--- a/README.md
+++ b/README.md
@@ -27,9 +27,29 @@ const openlayer = new Openlayer({
});
async function main() {
- const projectCreateResponse = await openlayer.projects.create({ name: 'My Project', taskType: 'llm-base' });
-
- console.log(projectCreateResponse.id);
+ const dataStreamResponse = await openlayer.inferencePipelines.data.stream(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ {
+ config: {
+ inputVariableNames: ['user_query'],
+ outputColumnName: 'output',
+ numOfTokenColumnName: 'tokens',
+ costColumnName: 'cost',
+ timestampColumnName: 'timestamp',
+ },
+ rows: [
+ {
+ user_query: "what's the meaning of life?",
+ output: '42',
+ tokens: 7,
+ cost: 0.02,
+ timestamp: 1620000000,
+ },
+ ],
+ },
+ );
+
+ console.log(dataStreamResponse.success);
}
main();
@@ -48,8 +68,26 @@ const openlayer = new Openlayer({
});
async function main() {
- const params: Openlayer.ProjectCreateParams = { name: 'My Project', taskType: 'llm-base' };
- const projectCreateResponse: Openlayer.ProjectCreateResponse = await openlayer.projects.create(params);
+ const params: Openlayer.InferencePipelines.DataStreamParams = {
+ config: {
+ inputVariableNames: ['user_query'],
+ outputColumnName: 'output',
+ numOfTokenColumnName: 'tokens',
+ costColumnName: 'cost',
+ timestampColumnName: 'timestamp',
+ },
+ rows: [
+ {
+ user_query: "what's the meaning of life?",
+ output: '42',
+ tokens: 7,
+ cost: 0.02,
+ timestamp: 1620000000,
+ },
+ ],
+ };
+ const dataStreamResponse: Openlayer.InferencePipelines.DataStreamResponse =
+ await openlayer.inferencePipelines.data.stream('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', params);
}
main();
@@ -66,8 +104,25 @@ a subclass of `APIError` will be thrown:
```ts
async function main() {
- const projectCreateResponse = await openlayer.projects
- .create({ name: 'My Project', taskType: 'llm-base' })
+ const dataStreamResponse = await openlayer.inferencePipelines.data
+ .stream('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
+ config: {
+ inputVariableNames: ['user_query'],
+ outputColumnName: 'output',
+ numOfTokenColumnName: 'tokens',
+ costColumnName: 'cost',
+ timestampColumnName: 'timestamp',
+ },
+ rows: [
+ {
+ user_query: "what's the meaning of life?",
+ output: '42',
+ tokens: 7,
+ cost: 0.02,
+ timestamp: 1620000000,
+ },
+ ],
+ })
.catch(async (err) => {
if (err instanceof Openlayer.APIError) {
console.log(err.status); // 400
@@ -111,7 +166,7 @@ const openlayer = new Openlayer({
});
// Or, configure per-request:
-await openlayer.projects.create({ name: 'My Project', taskType: 'llm-base' }, {
+await openlayer.inferencePipelines.data.stream('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', { config: { inputVariableNames: ['user_query'], outputColumnName: 'output', numOfTokenColumnName: 'tokens', costColumnName: 'cost', timestampColumnName: 'timestamp' }, rows: [{ user_query: 'what\'s the meaning of life?', output: '42', tokens: 7, cost: 0.02, timestamp: 1620000000 }] }, {
maxRetries: 5,
});
```
@@ -128,7 +183,7 @@ const openlayer = new Openlayer({
});
// Override per-request:
-await openlayer.projects.create({ name: 'My Project', taskType: 'llm-base' }, {
+await openlayer.inferencePipelines.data.stream('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', { config: { inputVariableNames: ['user_query'], outputColumnName: 'output', numOfTokenColumnName: 'tokens', costColumnName: 'cost', timestampColumnName: 'timestamp' }, rows: [{ user_query: 'what\'s the meaning of life?', output: '42', tokens: 7, cost: 0.02, timestamp: 1620000000 }] }, {
timeout: 5 * 1000,
});
```
@@ -149,15 +204,51 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
```ts
const openlayer = new Openlayer();
-const response = await openlayer.projects.create({ name: 'My Project', taskType: 'llm-base' }).asResponse();
+const response = await openlayer.inferencePipelines.data
+ .stream('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
+ config: {
+ inputVariableNames: ['user_query'],
+ outputColumnName: 'output',
+ numOfTokenColumnName: 'tokens',
+ costColumnName: 'cost',
+ timestampColumnName: 'timestamp',
+ },
+ rows: [
+ {
+ user_query: "what's the meaning of life?",
+ output: '42',
+ tokens: 7,
+ cost: 0.02,
+ timestamp: 1620000000,
+ },
+ ],
+ })
+ .asResponse();
console.log(response.headers.get('X-My-Header'));
console.log(response.statusText); // access the underlying Response object
-const { data: projectCreateResponse, response: raw } = await openlayer.projects
- .create({ name: 'My Project', taskType: 'llm-base' })
+const { data: dataStreamResponse, response: raw } = await openlayer.inferencePipelines.data
+ .stream('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
+ config: {
+ inputVariableNames: ['user_query'],
+ outputColumnName: 'output',
+ numOfTokenColumnName: 'tokens',
+ costColumnName: 'cost',
+ timestampColumnName: 'timestamp',
+ },
+ rows: [
+ {
+ user_query: "what's the meaning of life?",
+ output: '42',
+ tokens: 7,
+ cost: 0.02,
+ timestamp: 1620000000,
+ },
+ ],
+ })
.withResponse();
console.log(raw.headers.get('X-My-Header'));
-console.log(projectCreateResponse.id);
+console.log(dataStreamResponse.success);
```
### Making custom/undocumented requests
@@ -261,8 +352,26 @@ const openlayer = new Openlayer({
});
// Override per-request:
-await openlayer.projects.create(
- { name: 'My Project', taskType: 'llm-base' },
+await openlayer.inferencePipelines.data.stream(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ {
+ config: {
+ inputVariableNames: ['user_query'],
+ outputColumnName: 'output',
+ numOfTokenColumnName: 'tokens',
+ costColumnName: 'cost',
+ timestampColumnName: 'timestamp',
+ },
+ rows: [
+ {
+ user_query: "what's the meaning of life?",
+ output: '42',
+ tokens: 7,
+ cost: 0.02,
+ timestamp: 1620000000,
+ },
+ ],
+ },
{
httpAgent: new http.Agent({ keepAlive: false }),
},
diff --git a/api.md b/api.md
index 3c6c1f4e..ec55a49d 100644
--- a/api.md
+++ b/api.md
@@ -12,14 +12,56 @@ Methods:
## Commits
+Types:
+
+- CommitListResponse
+
+Methods:
+
+- client.projects.commits.list(projectId, { ...params }) -> CommitListResponse
+
## InferencePipelines
+Types:
+
+- InferencePipelineCreateResponse
+- InferencePipelineListResponse
+
+Methods:
+
+- client.projects.inferencePipelines.create(projectId, { ...params }) -> InferencePipelineCreateResponse
+- client.projects.inferencePipelines.list(projectId, { ...params }) -> InferencePipelineListResponse
+
# Commits
## TestResults
+Types:
+
+- TestResultListResponse
+
+Methods:
+
+- client.commits.testResults.list(projectVersionId, { ...params }) -> TestResultListResponse
+
# InferencePipelines
## Data
+Types:
+
+- DataStreamResponse
+
+Methods:
+
+- client.inferencePipelines.data.stream(inferencePipelineId, { ...params }) -> DataStreamResponse
+
## TestResults
+
+Types:
+
+- TestResultListResponse
+
+Methods:
+
+- client.inferencePipelines.testResults.list(inferencePipelineId, { ...params }) -> TestResultListResponse
diff --git a/src/resources/commits/commits.ts b/src/resources/commits/commits.ts
index 0b115516..bc3cc40d 100644
--- a/src/resources/commits/commits.ts
+++ b/src/resources/commits/commits.ts
@@ -9,4 +9,6 @@ export class Commits extends APIResource {
export namespace Commits {
export import TestResults = TestResultsAPI.TestResults;
+ export import TestResultListResponse = TestResultsAPI.TestResultListResponse;
+ export import TestResultListParams = TestResultsAPI.TestResultListParams;
}
diff --git a/src/resources/commits/index.ts b/src/resources/commits/index.ts
index 37b0c9d3..9f35f3f4 100644
--- a/src/resources/commits/index.ts
+++ b/src/resources/commits/index.ts
@@ -1,4 +1,4 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export { Commits } from './commits';
-export { TestResults } from './test-results';
+export { TestResultListResponse, TestResultListParams, TestResults } from './test-results';
diff --git a/src/resources/commits/test-results.ts b/src/resources/commits/test-results.ts
index 1ea73b57..e776e3c5 100644
--- a/src/resources/commits/test-results.ts
+++ b/src/resources/commits/test-results.ts
@@ -1,5 +1,284 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import * as TestResultsAPI from './test-results';
-export class TestResults extends APIResource {}
+export class TestResults extends APIResource {
+ /**
+ * List the test results for a project commit (project version).
+ */
+ list(
+ projectVersionId: string,
+ query?: TestResultListParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise;
+ list(projectVersionId: string, options?: Core.RequestOptions): Core.APIPromise;
+ list(
+ projectVersionId: string,
+ query: TestResultListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ if (isRequestOptions(query)) {
+ return this.list(projectVersionId, {}, query);
+ }
+ return this._client.get(`/versions/${projectVersionId}/results`, { query, ...options });
+ }
+}
+
+export interface TestResultListResponse {
+ _meta: TestResultListResponse._Meta;
+
+ items: Array;
+}
+
+export namespace TestResultListResponse {
+ export interface _Meta {
+ /**
+ * The current page.
+ */
+ page: number;
+
+ /**
+ * The number of items per page.
+ */
+ perPage: number;
+
+ /**
+ * The total number of items.
+ */
+ totalItems: number;
+
+ /**
+ * The total number of pages.
+ */
+ totalPages: number;
+ }
+
+ export interface Item {
+ /**
+ * Project version (commit) id.
+ */
+ id: string;
+
+ /**
+ * The creation date.
+ */
+ dateCreated: string;
+
+ /**
+ * The data end date.
+ */
+ dateDataEnds: string | null;
+
+ /**
+ * The data start date.
+ */
+ dateDataStarts: string | null;
+
+ /**
+ * The last updated date.
+ */
+ dateUpdated: string;
+
+ /**
+ * The inference pipeline id.
+ */
+ inferencePipelineId: string | null;
+
+ /**
+ * The project version (commit) id.
+ */
+ projectVersionId: string | null;
+
+ /**
+ * The status of the test.
+ */
+ status: 'running' | 'passing' | 'failing' | 'skipped' | 'error';
+
+ /**
+ * The status message.
+ */
+ statusMessage: string | null;
+
+ goal?: Item.Goal;
+
+ /**
+ * The test id.
+ */
+ goalId?: string | null;
+ }
+
+ export namespace Item {
+ export interface Goal {
+ /**
+ * The test id.
+ */
+ id: string;
+
+ /**
+ * The number of comments on the test.
+ */
+ commentCount: number;
+
+ /**
+ * The test creator id.
+ */
+ creatorId: string | null;
+
+ /**
+ * The date the test was archived.
+ */
+ dateArchived: string | null;
+
+ /**
+ * The creation date.
+ */
+ dateCreated: string;
+
+ /**
+ * The last updated date.
+ */
+ dateUpdated: string;
+
+ /**
+ * The test description.
+ */
+ description: unknown | null;
+
+ /**
+ * The test name.
+ */
+ name: string;
+
+ /**
+ * The test number.
+ */
+ number: number;
+
+ /**
+ * The project version (commit) id where the test was created.
+ */
+ originProjectVersionId: string | null;
+
+ /**
+ * The test subtype.
+ */
+ subtype: string;
+
+ /**
+ * Whether the test is suggested or user-created.
+ */
+ suggested: boolean;
+
+ thresholds: Array;
+
+ /**
+ * The test type.
+ */
+ type: string;
+
+ /**
+ * Whether the test is archived.
+ */
+ archived?: boolean;
+
+ /**
+ * The delay window in seconds. Only applies to tests that use production data.
+ */
+ delayWindow?: number | null;
+
+ /**
+ * The evaluation window in seconds. Only applies to tests that use production
+ * data.
+ */
+ evaluationWindow?: number | null;
+
+ /**
+ * Whether the test uses an ML model.
+ */
+ usesMlModel?: boolean;
+
+ /**
+ * Whether the test uses production data (monitoring mode only).
+ */
+ usesProductionData?: boolean;
+
+ /**
+ * Whether the test uses a reference dataset (monitoring mode only).
+ */
+ usesReferenceDataset?: boolean;
+
+ /**
+ * Whether the test uses a training dataset.
+ */
+ usesTrainingDataset?: boolean;
+
+ /**
+ * Whether the test uses a validation dataset.
+ */
+ usesValidationDataset?: boolean;
+ }
+
+ export namespace Goal {
+ export interface Threshold {
+ /**
+ * The insight name to be evaluated.
+ */
+ insightName?: string;
+
+ insightParameters?: Array;
+
+ /**
+ * The measurement to be evaluated.
+ */
+ measurement?: string;
+
+ /**
+ * The operator to be used for the evaluation.
+ */
+ operator?: string;
+
+ /**
+ * The value to be compared.
+ */
+ value?: number | boolean | string | Array;
+ }
+ }
+ }
+}
+
+export interface TestResultListParams {
+ /**
+ * Include archived goals.
+ */
+ includeArchived?: boolean;
+
+ /**
+ * The page to return in a paginated query.
+ */
+ page?: number;
+
+ /**
+ * Maximum number of items to return per page.
+ */
+ perPage?: number;
+
+ /**
+ * Filter list of test results by status. Available statuses are `running`,
+ * `passing`, `failing`, `skipped`, and `error`.
+ */
+ status?: 'running' | 'passing' | 'failing' | 'skipped' | 'error';
+
+ /**
+ * Filter objects by test type. Available types are `integrity`, `consistency`,
+ * `performance`, `fairness`, and `robustness`.
+ */
+ type?: 'integrity' | 'consistency' | 'performance' | 'fairness' | 'robustness';
+}
+
+export namespace TestResults {
+ export import TestResultListResponse = TestResultsAPI.TestResultListResponse;
+ export import TestResultListParams = TestResultsAPI.TestResultListParams;
+}
diff --git a/src/resources/inference-pipelines/data.ts b/src/resources/inference-pipelines/data.ts
index fc9ec738..41e52dbd 100644
--- a/src/resources/inference-pipelines/data.ts
+++ b/src/resources/inference-pipelines/data.ts
@@ -1,5 +1,285 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../../resource';
+import * as Core from '../../core';
+import * as DataAPI from './data';
-export class Data extends APIResource {}
+export class Data extends APIResource {
+ /**
+ * Stream production data to an inference pipeline.
+ */
+ stream(
+ inferencePipelineId: string,
+ body: DataStreamParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/inference-pipelines/${inferencePipelineId}/data-stream`, { body, ...options });
+ }
+}
+
+export interface DataStreamResponse {
+ success: true;
+}
+
+export interface DataStreamParams {
+ /**
+ * Configuration for the data stream. Depends on your **Openlayer project task
+ * type**.
+ */
+ config:
+ | DataStreamParams.LlmData
+ | DataStreamParams.TabularClassificationData
+ | DataStreamParams.TabularRegressionData
+ | DataStreamParams.TextClassificationData;
+
+ /**
+ * A list of entries that represent rows of a csv file
+ */
+ rows: Array>;
+}
+
+export namespace DataStreamParams {
+ export interface LlmData {
+ /**
+ * Name of the column with the model outputs.
+ */
+ outputColumnName: string;
+
+ /**
+ * Name of the column with the context retrieved. Applies to RAG use cases.
+ * Providing the context enables RAG-specific metrics.
+ */
+ contextColumnName?: string;
+
+ /**
+ * Name of the column with the cost associated with each row.
+ */
+ costColumnName?: string;
+
+ /**
+ * Name of the column with the ground truths.
+ */
+ groundTruthColumnName?: string;
+
+ /**
+ * Name of the column with the inference ids. This is useful if you want to update
+ * rows at a later point in time. If not provided, a unique id is generated by
+ * Openlayer.
+ */
+ inferenceIdColumnName?: string;
+
+ /**
+ * Array of input variable names. Each input variable should be a dataset column.
+ */
+ inputVariableNames?: Array;
+
+ /**
+ * Name of the column with the latencies.
+ */
+ latencyColumnName?: string;
+
+ /**
+ * Object with metadata.
+ */
+ metadata?: unknown;
+
+ /**
+ * Name of the column with the total number of tokens.
+ */
+ numOfTokenColumnName?: string | null;
+
+ /**
+ * Prompt for the LLM.
+ */
+ prompt?: Array;
+
+ /**
+ * Name of the column with the questions. Applies to RAG use cases. Providing the
+ * question enables RAG-specific metrics.
+ */
+ questionColumnName?: string;
+
+ /**
+ * Name of the column with the timestamps. Timestamps must be in UNIX sec format.
+ * If not provided, the upload timestamp is used.
+ */
+ timestampColumnName?: string;
+ }
+
+ export namespace LlmData {
+ export interface Prompt {
+ /**
+ * Content of the prompt.
+ */
+ content?: string;
+
+ /**
+ * Role of the prompt.
+ */
+ role?: string;
+ }
+ }
+
+ export interface TabularClassificationData {
+ /**
+ * List of class names indexed by label integer in the dataset. E.g. ["Retained",
+ * "Exited"] when 0, 1 are in your label column.
+ */
+ classNames: Array;
+
+ /**
+ * Array with the names of all categorical features in the dataset. E.g. ["Age",
+ * "Geography"].
+ */
+ categoricalFeatureNames?: Array;
+
+ /**
+ * Array with all input feature names.
+ */
+ featureNames?: Array;
+
+ /**
+ * Name of the column with the inference ids. This is useful if you want to update
+ * rows at a later point in time. If not provided, a unique id is generated by
+ * Openlayer.
+ */
+ inferenceIdColumnName?: string;
+
+ /**
+ * Name of the column with the labels. The data in this column must be
+ * **zero-indexed integers**, matching the list provided in `classNames`.
+ */
+ labelColumnName?: string;
+
+ /**
+ * Name of the column with the latencies.
+ */
+ latencyColumnName?: string;
+
+ /**
+ * Object with metadata.
+ */
+ metadata?: unknown;
+
+ /**
+ * Name of the column with the model's predictions as **zero-indexed integers**.
+ */
+ predictionsColumnName?: string;
+
+ /**
+ * Name of the column with the model's predictions as **lists of class
+ * probabilities**.
+ */
+ predictionScoresColumnName?: string;
+
+ /**
+ * Name of the column with the timestamps. Timestamps must be in UNIX sec format.
+ * If not provided, the upload timestamp is used.
+ */
+ timestampColumnName?: string;
+ }
+
+ export interface TabularRegressionData {
+ /**
+ * Array with the names of all categorical features in the dataset. E.g. ["Gender",
+ * "Geography"].
+ */
+ categoricalFeatureNames?: Array;
+
+ /**
+ * Array with all input feature names.
+ */
+ featureNames?: Array;
+
+ /**
+ * Name of the column with the inference ids. This is useful if you want to update
+ * rows at a later point in time. If not provided, a unique id is generated by
+ * Openlayer.
+ */
+ inferenceIdColumnName?: string;
+
+ /**
+ * Name of the column with the latencies.
+ */
+ latencyColumnName?: string;
+
+ /**
+ * Object with metadata.
+ */
+ metadata?: unknown;
+
+ /**
+ * Name of the column with the model's predictions.
+ */
+ predictionsColumnName?: string;
+
+ /**
+ * Name of the column with the targets (ground truth values).
+ */
+ targetColumnName?: string;
+
+ /**
+ * Name of the column with the timestamps. Timestamps must be in UNIX sec format.
+ * If not provided, the upload timestamp is used.
+ */
+ timestampColumnName?: string;
+ }
+
+ export interface TextClassificationData {
+ /**
+ * List of class names indexed by label integer in the dataset. E.g. ["Retained",
+ * "Exited"] when 0, 1 are in your label column.
+ */
+ classNames: Array;
+
+ /**
+ * Name of the column with the inference ids. This is useful if you want to update
+ * rows at a later point in time. If not provided, a unique id is generated by
+ * Openlayer.
+ */
+ inferenceIdColumnName?: string;
+
+ /**
+ * Name of the column with the labels. The data in this column must be
+ * **zero-indexed integers**, matching the list provided in `classNames`.
+ */
+ labelColumnName?: string;
+
+ /**
+ * Name of the column with the latencies.
+ */
+ latencyColumnName?: string;
+
+ /**
+ * Object with metadata.
+ */
+ metadata?: unknown;
+
+ /**
+ * Name of the column with the model's predictions as **zero-indexed integers**.
+ */
+ predictionsColumnName?: string;
+
+ /**
+ * Name of the column with the model's predictions as **lists of class
+ * probabilities**.
+ */
+ predictionScoresColumnName?: string;
+
+ /**
+ * Name of the column with the text data.
+ */
+ textColumnName?: string;
+
+ /**
+ * Name of the column with the timestamps. Timestamps must be in UNIX sec format.
+ * If not provided, the upload timestamp is used.
+ */
+ timestampColumnName?: string;
+ }
+}
+
+export namespace Data {
+ export import DataStreamResponse = DataAPI.DataStreamResponse;
+ export import DataStreamParams = DataAPI.DataStreamParams;
+}
diff --git a/src/resources/inference-pipelines/index.ts b/src/resources/inference-pipelines/index.ts
index f40f01bc..d8a6a0b2 100644
--- a/src/resources/inference-pipelines/index.ts
+++ b/src/resources/inference-pipelines/index.ts
@@ -1,5 +1,5 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-export { Data } from './data';
+export { DataStreamResponse, DataStreamParams, Data } from './data';
export { InferencePipelines } from './inference-pipelines';
-export { TestResults } from './test-results';
+export { TestResultListResponse, TestResultListParams, TestResults } from './test-results';
diff --git a/src/resources/inference-pipelines/inference-pipelines.ts b/src/resources/inference-pipelines/inference-pipelines.ts
index b7fac051..99515d82 100644
--- a/src/resources/inference-pipelines/inference-pipelines.ts
+++ b/src/resources/inference-pipelines/inference-pipelines.ts
@@ -11,5 +11,9 @@ export class InferencePipelines extends APIResource {
export namespace InferencePipelines {
export import Data = DataAPI.Data;
+ export import DataStreamResponse = DataAPI.DataStreamResponse;
+ export import DataStreamParams = DataAPI.DataStreamParams;
export import TestResults = TestResultsAPI.TestResults;
+ export import TestResultListResponse = TestResultsAPI.TestResultListResponse;
+ export import TestResultListParams = TestResultsAPI.TestResultListParams;
}
diff --git a/src/resources/inference-pipelines/test-results.ts b/src/resources/inference-pipelines/test-results.ts
index 1ea73b57..083fe4f2 100644
--- a/src/resources/inference-pipelines/test-results.ts
+++ b/src/resources/inference-pipelines/test-results.ts
@@ -1,5 +1,279 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import * as TestResultsAPI from './test-results';
-export class TestResults extends APIResource {}
+export class TestResults extends APIResource {
+ /**
+ * List the latest test results for an inference pipeline.
+ */
+ list(
+ inferencePipelineId: string,
+ query?: TestResultListParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise;
+ list(inferencePipelineId: string, options?: Core.RequestOptions): Core.APIPromise;
+ list(
+ inferencePipelineId: string,
+ query: TestResultListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ if (isRequestOptions(query)) {
+ return this.list(inferencePipelineId, {}, query);
+ }
+ return this._client.get(`/inference-pipelines/${inferencePipelineId}/results`, { query, ...options });
+ }
+}
+
+export interface TestResultListResponse {
+ _meta: TestResultListResponse._Meta;
+
+ items: Array;
+}
+
+export namespace TestResultListResponse {
+ export interface _Meta {
+ /**
+ * The current page.
+ */
+ page: number;
+
+ /**
+ * The number of items per page.
+ */
+ perPage: number;
+
+ /**
+ * The total number of items.
+ */
+ totalItems: number;
+
+ /**
+ * The total number of pages.
+ */
+ totalPages: number;
+ }
+
+ export interface Item {
+ /**
+ * Project version (commit) id.
+ */
+ id: string;
+
+ /**
+ * The creation date.
+ */
+ dateCreated: string;
+
+ /**
+ * The data end date.
+ */
+ dateDataEnds: string | null;
+
+ /**
+ * The data start date.
+ */
+ dateDataStarts: string | null;
+
+ /**
+ * The last updated date.
+ */
+ dateUpdated: string;
+
+ /**
+ * The inference pipeline id.
+ */
+ inferencePipelineId: string | null;
+
+ /**
+ * The project version (commit) id.
+ */
+ projectVersionId: string | null;
+
+ /**
+ * The status of the test.
+ */
+ status: 'running' | 'passing' | 'failing' | 'skipped' | 'error';
+
+ /**
+ * The status message.
+ */
+ statusMessage: string | null;
+
+ goal?: Item.Goal;
+
+ /**
+ * The test id.
+ */
+ goalId?: string | null;
+ }
+
+ export namespace Item {
+ export interface Goal {
+ /**
+ * The test id.
+ */
+ id: string;
+
+ /**
+ * The number of comments on the test.
+ */
+ commentCount: number;
+
+ /**
+ * The test creator id.
+ */
+ creatorId: string | null;
+
+ /**
+ * The date the test was archived.
+ */
+ dateArchived: string | null;
+
+ /**
+ * The creation date.
+ */
+ dateCreated: string;
+
+ /**
+ * The last updated date.
+ */
+ dateUpdated: string;
+
+ /**
+ * The test description.
+ */
+ description: unknown | null;
+
+ /**
+ * The test name.
+ */
+ name: string;
+
+ /**
+ * The test number.
+ */
+ number: number;
+
+ /**
+ * The project version (commit) id where the test was created.
+ */
+ originProjectVersionId: string | null;
+
+ /**
+ * The test subtype.
+ */
+ subtype: string;
+
+ /**
+ * Whether the test is suggested or user-created.
+ */
+ suggested: boolean;
+
+ thresholds: Array;
+
+ /**
+ * The test type.
+ */
+ type: string;
+
+ /**
+ * Whether the test is archived.
+ */
+ archived?: boolean;
+
+ /**
+ * The delay window in seconds. Only applies to tests that use production data.
+ */
+ delayWindow?: number | null;
+
+ /**
+ * The evaluation window in seconds. Only applies to tests that use production
+ * data.
+ */
+ evaluationWindow?: number | null;
+
+ /**
+ * Whether the test uses an ML model.
+ */
+ usesMlModel?: boolean;
+
+ /**
+ * Whether the test uses production data (monitoring mode only).
+ */
+ usesProductionData?: boolean;
+
+ /**
+ * Whether the test uses a reference dataset (monitoring mode only).
+ */
+ usesReferenceDataset?: boolean;
+
+ /**
+ * Whether the test uses a training dataset.
+ */
+ usesTrainingDataset?: boolean;
+
+ /**
+ * Whether the test uses a validation dataset.
+ */
+ usesValidationDataset?: boolean;
+ }
+
+ export namespace Goal {
+ export interface Threshold {
+ /**
+ * The insight name to be evaluated.
+ */
+ insightName?: string;
+
+ insightParameters?: Array;
+
+ /**
+ * The measurement to be evaluated.
+ */
+ measurement?: string;
+
+ /**
+ * The operator to be used for the evaluation.
+ */
+ operator?: string;
+
+ /**
+ * The value to be compared.
+ */
+ value?: number | boolean | string | Array;
+ }
+ }
+ }
+}
+
+export interface TestResultListParams {
+ /**
+ * The page to return in a paginated query.
+ */
+ page?: number;
+
+ /**
+ * Maximum number of items to return per page.
+ */
+ perPage?: number;
+
+ /**
+ * Filter list of test results by status. Available statuses are `running`,
+ * `passing`, `failing`, `skipped`, and `error`.
+ */
+ status?: 'running' | 'passing' | 'failing' | 'skipped' | 'error';
+
+ /**
+ * Filter objects by test type. Available types are `integrity`, `consistency`,
+ * `performance`, `fairness`, and `robustness`.
+ */
+ type?: 'integrity' | 'consistency' | 'performance' | 'fairness' | 'robustness';
+}
+
+export namespace TestResults {
+ export import TestResultListResponse = TestResultsAPI.TestResultListResponse;
+ export import TestResultListParams = TestResultsAPI.TestResultListParams;
+}
diff --git a/src/resources/projects/commits.ts b/src/resources/projects/commits.ts
index 06f99c84..e731e047 100644
--- a/src/resources/projects/commits.ts
+++ b/src/resources/projects/commits.ts
@@ -1,5 +1,226 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import * as CommitsAPI from './commits';
-export class Commits extends APIResource {}
+export class Commits extends APIResource {
+ /**
+ * List the commits (project versions) in a project.
+ */
+ list(
+ projectId: string,
+ query?: CommitListParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise;
+ list(projectId: string, options?: Core.RequestOptions): Core.APIPromise;
+ list(
+ projectId: string,
+ query: CommitListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ if (isRequestOptions(query)) {
+ return this.list(projectId, {}, query);
+ }
+ return this._client.get(`/projects/${projectId}/versions`, { query, ...options });
+ }
+}
+
+export interface CommitListResponse {
+ _meta: CommitListResponse._Meta;
+
+ items: Array;
+}
+
+export namespace CommitListResponse {
+ export interface _Meta {
+ /**
+ * The current page.
+ */
+ page: number;
+
+ /**
+ * The number of items per page.
+ */
+ perPage: number;
+
+ /**
+ * The total number of items.
+ */
+ totalItems: number;
+
+ /**
+ * The total number of pages.
+ */
+ totalPages: number;
+ }
+
+ export interface Item {
+ /**
+ * The project version (commit) id.
+ */
+ id: string;
+
+ /**
+ * The details of a commit (project version).
+ */
+ commit: Item.Commit;
+
+ /**
+ * The commit archive date.
+ */
+ dateArchived: string | null;
+
+ /**
+ * The project version (commit) creation date.
+ */
+ dateCreated: string;
+
+ /**
+ * The number of tests that are failing for the commit.
+ */
+ failingGoalCount: number;
+
+ /**
+ * The model id.
+ */
+ mlModelId: string | null;
+
+ /**
+ * The number of tests that are passing for the commit.
+ */
+ passingGoalCount: number;
+
+ /**
+ * The project id.
+ */
+ projectId: string;
+
+ /**
+ * The commit status. Initially, the commit is `queued`, then, it switches to
+ * `running`. Finally, it can be `paused`, `failed`, or `completed`.
+ */
+ status: 'queued' | 'running' | 'paused' | 'failed' | 'completed' | 'unknown';
+
+ /**
+ * The commit status message.
+ */
+ statusMessage: string | null;
+
+ /**
+ * The total number of tests for the commit.
+ */
+ totalGoalCount: number;
+
+ /**
+ * The training dataset id.
+ */
+ trainingDatasetId: string | null;
+
+ /**
+ * The validation dataset id.
+ */
+ validationDatasetId: string | null;
+
+ /**
+ * Whether the commit is archived.
+ */
+ archived?: boolean | null;
+
+ /**
+ * The deployment status associated with the commit's model.
+ */
+ deploymentStatus?: string;
+
+ links?: Item.Links;
+ }
+
+ export namespace Item {
+ /**
+ * The details of a commit (project version).
+ */
+ export interface Commit {
+ /**
+ * The commit id.
+ */
+ id: string;
+
+ /**
+ * The author id of the commit.
+ */
+ authorId: string;
+
+ /**
+ * The size of the commit bundle in bytes.
+ */
+ fileSize: number | null;
+
+ /**
+ * The commit message.
+ */
+ message: string;
+
+ /**
+ * The model id.
+ */
+ mlModelId: string | null;
+
+ /**
+ * The storage URI where the commit bundle is stored.
+ */
+ storageUri: string;
+
+ /**
+ * The training dataset id.
+ */
+ trainingDatasetId: string | null;
+
+ /**
+ * The validation dataset id.
+ */
+ validationDatasetId: string | null;
+
+ /**
+ * The commit creation date.
+ */
+ dateCreated?: string;
+
+ /**
+ * The ref of the corresponding git commit.
+ */
+ gitCommitRef?: string;
+
+ /**
+ * The SHA of the corresponding git commit.
+ */
+ gitCommitSha?: number;
+
+ /**
+ * The URL of the corresponding git commit.
+ */
+ gitCommitUrl?: string;
+ }
+
+ export interface Links {
+ app: string;
+ }
+ }
+}
+
+export interface CommitListParams {
+ /**
+ * The page to return in a paginated query.
+ */
+ page?: number;
+
+ /**
+ * Maximum number of items to return per page.
+ */
+ perPage?: number;
+}
+
+export namespace Commits {
+ export import CommitListResponse = CommitsAPI.CommitListResponse;
+ export import CommitListParams = CommitsAPI.CommitListParams;
+}
diff --git a/src/resources/projects/index.ts b/src/resources/projects/index.ts
index 8107c3ac..62a84c5a 100644
--- a/src/resources/projects/index.ts
+++ b/src/resources/projects/index.ts
@@ -1,7 +1,13 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-export { Commits } from './commits';
-export { InferencePipelines } from './inference-pipelines';
+export { CommitListResponse, CommitListParams, Commits } from './commits';
+export {
+ InferencePipelineCreateResponse,
+ InferencePipelineListResponse,
+ InferencePipelineCreateParams,
+ InferencePipelineListParams,
+ InferencePipelines,
+} from './inference-pipelines';
export {
ProjectCreateResponse,
ProjectListResponse,
diff --git a/src/resources/projects/inference-pipelines.ts b/src/resources/projects/inference-pipelines.ts
index 31b150cd..3640b427 100644
--- a/src/resources/projects/inference-pipelines.ts
+++ b/src/resources/projects/inference-pipelines.ts
@@ -1,5 +1,265 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import * as InferencePipelinesAPI from './inference-pipelines';
-export class InferencePipelines extends APIResource {}
+export class InferencePipelines extends APIResource {
+ /**
+ * Create an inference pipeline in a project.
+ */
+ create(
+ projectId: string,
+ body: InferencePipelineCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/projects/${projectId}/inference-pipelines`, { body, ...options });
+ }
+
+ /**
+ * List the inference pipelines in a project.
+ */
+ list(
+ projectId: string,
+ query?: InferencePipelineListParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise;
+ list(projectId: string, options?: Core.RequestOptions): Core.APIPromise;
+ list(
+ projectId: string,
+ query: InferencePipelineListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ if (isRequestOptions(query)) {
+ return this.list(projectId, {}, query);
+ }
+ return this._client.get(`/projects/${projectId}/inference-pipelines`, { query, ...options });
+ }
+}
+
+export interface InferencePipelineCreateResponse {
+ /**
+ * The inference pipeline id.
+ */
+ id: string;
+
+ /**
+ * The creation date.
+ */
+ dateCreated: string;
+
+ /**
+ * The last test evaluation date.
+ */
+ dateLastEvaluated: string | null;
+
+ /**
+ * The last data sample received date.
+ */
+ dateLastSampleReceived: string | null;
+
+ /**
+ * The next test evaluation date.
+ */
+ dateOfNextEvaluation: string | null;
+
+ /**
+ * The last updated date.
+ */
+ dateUpdated: string;
+
+ /**
+ * The inference pipeline description.
+ */
+ description: string | null;
+
+ /**
+ * The number of tests failing.
+ */
+ failingGoalCount: number;
+
+ links: InferencePipelineCreateResponse.Links;
+
+ /**
+ * The inference pipeline name.
+ */
+ name: string;
+
+ /**
+ * The number of tests passing.
+ */
+ passingGoalCount: number;
+
+ /**
+ * The project id.
+ */
+ projectId: string;
+
+ /**
+ * The status of test evaluation for the inference pipeline.
+ */
+ status: 'queued' | 'running' | 'paused' | 'failed' | 'completed' | 'unknown';
+
+ /**
+ * The status message of test evaluation for the inference pipeline.
+ */
+ statusMessage: string | null;
+
+ /**
+ * The total number of tests.
+ */
+ totalGoalCount: number;
+}
+
+export namespace InferencePipelineCreateResponse {
+ export interface Links {
+ app: string;
+ }
+}
+
+export interface InferencePipelineListResponse {
+ _meta: InferencePipelineListResponse._Meta;
+
+ items: Array;
+}
+
+export namespace InferencePipelineListResponse {
+ export interface _Meta {
+ /**
+ * The current page.
+ */
+ page: number;
+
+ /**
+ * The number of items per page.
+ */
+ perPage: number;
+
+ /**
+ * The total number of items.
+ */
+ totalItems: number;
+
+ /**
+ * The total number of pages.
+ */
+ totalPages: number;
+ }
+
+ export interface Item {
+ /**
+ * The inference pipeline id.
+ */
+ id: string;
+
+ /**
+ * The creation date.
+ */
+ dateCreated: string;
+
+ /**
+ * The last test evaluation date.
+ */
+ dateLastEvaluated: string | null;
+
+ /**
+ * The last data sample received date.
+ */
+ dateLastSampleReceived: string | null;
+
+ /**
+ * The next test evaluation date.
+ */
+ dateOfNextEvaluation: string | null;
+
+ /**
+ * The last updated date.
+ */
+ dateUpdated: string;
+
+ /**
+ * The inference pipeline description.
+ */
+ description: string | null;
+
+ /**
+ * The number of tests failing.
+ */
+ failingGoalCount: number;
+
+ links: Item.Links;
+
+ /**
+ * The inference pipeline name.
+ */
+ name: string;
+
+ /**
+ * The number of tests passing.
+ */
+ passingGoalCount: number;
+
+ /**
+ * The project id.
+ */
+ projectId: string;
+
+ /**
+ * The status of test evaluation for the inference pipeline.
+ */
+ status: 'queued' | 'running' | 'paused' | 'failed' | 'completed' | 'unknown';
+
+ /**
+ * The status message of test evaluation for the inference pipeline.
+ */
+ statusMessage: string | null;
+
+ /**
+ * The total number of tests.
+ */
+ totalGoalCount: number;
+ }
+
+ export namespace Item {
+ export interface Links {
+ app: string;
+ }
+ }
+}
+
+export interface InferencePipelineCreateParams {
+ /**
+ * The inference pipeline description.
+ */
+ description: string | null;
+
+ /**
+ * The inference pipeline name.
+ */
+ name: string;
+}
+
+export interface InferencePipelineListParams {
+ /**
+ * Filter list of items by name.
+ */
+ name?: string;
+
+ /**
+ * The page to return in a paginated query.
+ */
+ page?: number;
+
+ /**
+ * Maximum number of items to return per page.
+ */
+ perPage?: number;
+}
+
+export namespace InferencePipelines {
+ export import InferencePipelineCreateResponse = InferencePipelinesAPI.InferencePipelineCreateResponse;
+ export import InferencePipelineListResponse = InferencePipelinesAPI.InferencePipelineListResponse;
+ export import InferencePipelineCreateParams = InferencePipelinesAPI.InferencePipelineCreateParams;
+ export import InferencePipelineListParams = InferencePipelinesAPI.InferencePipelineListParams;
+}
diff --git a/src/resources/projects/projects.ts b/src/resources/projects/projects.ts
index 9af4e9d0..c2f7c1dd 100644
--- a/src/resources/projects/projects.ts
+++ b/src/resources/projects/projects.ts
@@ -353,5 +353,11 @@ export namespace Projects {
export import ProjectCreateParams = ProjectsAPI.ProjectCreateParams;
export import ProjectListParams = ProjectsAPI.ProjectListParams;
export import Commits = CommitsAPI.Commits;
+ export import CommitListResponse = CommitsAPI.CommitListResponse;
+ export import CommitListParams = CommitsAPI.CommitListParams;
export import InferencePipelines = InferencePipelinesAPI.InferencePipelines;
+ export import InferencePipelineCreateResponse = InferencePipelinesAPI.InferencePipelineCreateResponse;
+ export import InferencePipelineListResponse = InferencePipelinesAPI.InferencePipelineListResponse;
+ export import InferencePipelineCreateParams = InferencePipelinesAPI.InferencePipelineCreateParams;
+ export import InferencePipelineListParams = InferencePipelinesAPI.InferencePipelineListParams;
}
diff --git a/tests/api-resources/commits/test-results.test.ts b/tests/api-resources/commits/test-results.test.ts
new file mode 100644
index 00000000..626ed97e
--- /dev/null
+++ b/tests/api-resources/commits/test-results.test.ts
@@ -0,0 +1,42 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Openlayer from 'openlayer';
+import { Response } from 'node-fetch';
+
+const openlayer = new Openlayer({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource testResults', () => {
+ test('list', async () => {
+ const responsePromise = openlayer.commits.testResults.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openlayer.commits.testResults.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(Openlayer.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openlayer.commits.testResults.list(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ { includeArchived: true, page: 1, perPage: 1, status: 'passing', type: 'integrity' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(Openlayer.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/inference-pipelines/data.test.ts b/tests/api-resources/inference-pipelines/data.test.ts
new file mode 100644
index 00000000..d84517e7
--- /dev/null
+++ b/tests/api-resources/inference-pipelines/data.test.ts
@@ -0,0 +1,45 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Openlayer from 'openlayer';
+import { Response } from 'node-fetch';
+
+const openlayer = new Openlayer({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource data', () => {
+ test('stream: only required params', async () => {
+ const responsePromise = openlayer.inferencePipelines.data.stream('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
+ config: { outputColumnName: 'output' },
+ rows: [{ user_query: 'bar', output: 'bar', tokens: 'bar', cost: 'bar', timestamp: 'bar' }],
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('stream: required and optional params', async () => {
+ const response = await openlayer.inferencePipelines.data.stream('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
+ config: {
+ numOfTokenColumnName: 'tokens',
+ contextColumnName: 'context',
+ costColumnName: 'cost',
+ groundTruthColumnName: 'ground_truth',
+ inferenceIdColumnName: 'id',
+ inputVariableNames: ['user_query'],
+ latencyColumnName: 'latency',
+ metadata: {},
+ outputColumnName: 'output',
+ prompt: [{ role: 'user', content: '{{ user_query }}' }],
+ questionColumnName: 'question',
+ timestampColumnName: 'timestamp',
+ },
+ rows: [{ user_query: 'bar', output: 'bar', tokens: 'bar', cost: 'bar', timestamp: 'bar' }],
+ });
+ });
+});
diff --git a/tests/api-resources/inference-pipelines/test-results.test.ts b/tests/api-resources/inference-pipelines/test-results.test.ts
new file mode 100644
index 00000000..ac3f4427
--- /dev/null
+++ b/tests/api-resources/inference-pipelines/test-results.test.ts
@@ -0,0 +1,44 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Openlayer from 'openlayer';
+import { Response } from 'node-fetch';
+
+const openlayer = new Openlayer({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource testResults', () => {
+ test('list', async () => {
+ const responsePromise = openlayer.inferencePipelines.testResults.list(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ );
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openlayer.inferencePipelines.testResults.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(Openlayer.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openlayer.inferencePipelines.testResults.list(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ { page: 1, perPage: 1, status: 'passing', type: 'integrity' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(Openlayer.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/projects/commits.test.ts b/tests/api-resources/projects/commits.test.ts
new file mode 100644
index 00000000..1e6149f0
--- /dev/null
+++ b/tests/api-resources/projects/commits.test.ts
@@ -0,0 +1,42 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Openlayer from 'openlayer';
+import { Response } from 'node-fetch';
+
+const openlayer = new Openlayer({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource commits', () => {
+ test('list', async () => {
+ const responsePromise = openlayer.projects.commits.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openlayer.projects.commits.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(Openlayer.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openlayer.projects.commits.list(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ { page: 1, perPage: 1 },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(Openlayer.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/projects/inference-pipelines.test.ts b/tests/api-resources/projects/inference-pipelines.test.ts
new file mode 100644
index 00000000..6b8f0bf0
--- /dev/null
+++ b/tests/api-resources/projects/inference-pipelines.test.ts
@@ -0,0 +1,65 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Openlayer from 'openlayer';
+import { Response } from 'node-fetch';
+
+const openlayer = new Openlayer({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource inferencePipelines', () => {
+ test('create: only required params', async () => {
+ const responsePromise = openlayer.projects.inferencePipelines.create(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ { description: 'This pipeline is used for production.', name: 'production' },
+ );
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await openlayer.projects.inferencePipelines.create(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ { description: 'This pipeline is used for production.', name: 'production' },
+ );
+ });
+
+ test('list', async () => {
+ const responsePromise = openlayer.projects.inferencePipelines.list(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ );
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openlayer.projects.inferencePipelines.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(Openlayer.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openlayer.projects.inferencePipelines.list(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ { name: 'string', page: 1, perPage: 1 },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(Openlayer.NotFoundError);
+ });
+});