diff --git a/.stats.yml b/.stats.yml
index f83abfd..39a1051 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
-configured_endpoints: 33
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-4ed32c3243ce7a772e55bb1ba204736fc3fb1d712d8ca0eb91bac0c7ac626938.yml
+configured_endpoints: 35
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-c0b9cfd71efb976777313fb342d2e31ebeb44b1b3f9bb7ddea971e6b2bc5fd19.yml
diff --git a/api.md b/api.md
index b4ce066..1721b50 100644
--- a/api.md
+++ b/api.md
@@ -156,3 +156,23 @@ Types:
Methods:
- client.lmUnit.create({ ...params }) -> LMUnitCreateResponse
+
+# Rerank
+
+Types:
+
+- RerankCreateResponse
+
+Methods:
+
+- client.rerank.create({ ...params }) -> RerankCreateResponse
+
+# Generate
+
+Types:
+
+- GenerateCreateResponse
+
+Methods:
+
+- client.generate.create({ ...params }) -> GenerateCreateResponse
diff --git a/src/index.ts b/src/index.ts
index 242d2aa..12c6113 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -15,7 +15,9 @@ import {
} from './pagination';
import * as Uploads from './uploads';
import * as API from './resources/index';
+import { Generate, GenerateCreateParams, GenerateCreateResponse } from './resources/generate';
import { LMUnit, LMUnitCreateParams, LMUnitCreateResponse } from './resources/lmunit';
+import { Rerank, RerankCreateParams, RerankCreateResponse } from './resources/rerank';
import {
Agent as AgentsAPIAgent,
AgentCreateParams,
@@ -157,6 +159,8 @@ export class ContextualAI extends Core.APIClient {
datastores: API.Datastores = new API.Datastores(this);
agents: API.Agents = new API.Agents(this);
lmUnit: API.LMUnit = new API.LMUnit(this);
+ rerank: API.Rerank = new API.Rerank(this);
+ generate: API.Generate = new API.Generate(this);
protected override defaultQuery(): Core.DefaultQuery | undefined {
return this._options.defaultQuery;
@@ -203,6 +207,8 @@ ContextualAI.DatastoresDatastoresPage = DatastoresDatastoresPage;
ContextualAI.Agents = Agents;
ContextualAI.AgentsPage = AgentsPage;
ContextualAI.LMUnit = LMUnit;
+ContextualAI.Rerank = Rerank;
+ContextualAI.Generate = Generate;
export declare namespace ContextualAI {
export type RequestOptions = Core.RequestOptions;
@@ -252,6 +258,18 @@ export declare namespace ContextualAI {
type LMUnitCreateResponse as LMUnitCreateResponse,
type LMUnitCreateParams as LMUnitCreateParams,
};
+
+ export {
+ Rerank as Rerank,
+ type RerankCreateResponse as RerankCreateResponse,
+ type RerankCreateParams as RerankCreateParams,
+ };
+
+ export {
+ Generate as Generate,
+ type GenerateCreateResponse as GenerateCreateResponse,
+ type GenerateCreateParams as GenerateCreateParams,
+ };
}
export { toFile, fileFromPath } from './uploads';
diff --git a/src/resources/agents/datasets/evaluate.ts b/src/resources/agents/datasets/evaluate.ts
index c413027..511ce10 100644
--- a/src/resources/agents/datasets/evaluate.ts
+++ b/src/resources/agents/datasets/evaluate.ts
@@ -9,20 +9,20 @@ import { type Response } from '../../../_shims/index';
export class Evaluate extends APIResource {
/**
* Create a new evaluation `Dataset` for the specified `Agent` using the provided
- * JSONL file. A `Dataset` is a versioned collection of samples conforming to a
- * particular schema, and can be used to store `Evaluation` test-sets and retrieve
- * `Evaluation` results.
+ * JSONL or CSV file. A `Dataset` is a versioned collection of samples conforming
+ * to a particular schema, and can be used to store `Evaluation` test-sets and
+ * retrieve `Evaluation` results.
*
* Each `Dataset` is versioned and validated against its schema during creation and
* subsequent updates. The provided `Dataset` file must conform to the schema
* defined for the `dataset_type`.
*
- * File schema for `dataset_type` `evaluation_set` is a JSONL or CSV file where
- * each line is one JSON object with the following required keys:
+ * File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
+ * where each line is one JSON object. The following keys are required:
*
- * - `prompt` (required, `string`): Prompt or question
+ * - `prompt` (`string`): Prompt or question
*
- * - `reference` (required, `string`): Required reference or ground truth response
+ * - `reference` (`string`): Reference or ground truth response
*/
create(
agentId: string,
@@ -80,12 +80,12 @@ export class Evaluate extends APIResource {
* Create a new version of the dataset by appending content to the `Dataset` and
* validating against its schema.
*
- * File schema for `dataset_type` `evaluation_set` is a JSONL file where each line
- * is one JSON object with the following required keys:
+ * File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
+ * where each line is one JSON object. The following keys are required:
*
* - `prompt` (`string`): Prompt or question
*
- * - `reference` (`string`): Required reference or ground truth response
+ * - `reference` (`string`): Reference or ground truth response
*/
update(
agentId: string,
@@ -184,7 +184,7 @@ export interface EvaluateCreateParams {
dataset_type: 'evaluation_set';
/**
- * JSONL file containing the evaluation dataset
+ * JSONL or CSV file containing the evaluation dataset
*/
file: Core.Uploadable;
}
@@ -210,7 +210,7 @@ export interface EvaluateUpdateParams {
dataset_type: 'evaluation_set';
/**
- * JSONL file containing the entries to append to the evaluation dataset
+ * JSONL or CSV file containing the entries to append to the evaluation dataset
*/
file: Core.Uploadable;
}
diff --git a/src/resources/agents/evaluate/evaluate.ts b/src/resources/agents/evaluate/evaluate.ts
index 2c0bfb3..efb2917 100644
--- a/src/resources/agents/evaluate/evaluate.ts
+++ b/src/resources/agents/evaluate/evaluate.ts
@@ -72,7 +72,7 @@ export interface EvaluateCreateParams {
/**
* ID of the model to evaluate. Uses the default model if not specified.
*/
- llm_model_id?: string | null;
+ llm_model_id?: string;
}
Evaluate.Jobs = Jobs;
diff --git a/src/resources/agents/query.ts b/src/resources/agents/query.ts
index 07bfc88..0aa82f7 100644
--- a/src/resources/agents/query.ts
+++ b/src/resources/agents/query.ts
@@ -190,7 +190,7 @@ export namespace QueryResponse {
content: string;
/**
- * Role of sender
+ * Role of the sender
*/
role: 'user' | 'system' | 'assistant';
}
@@ -301,7 +301,8 @@ export interface QueryCreateParams {
export namespace QueryCreateParams {
/**
- * Message object for a message sent or received in a /query conversation
+ * Message object for a message sent or received in a /query and /generate
+ * conversation
*/
export interface Message {
/**
@@ -310,7 +311,7 @@ export namespace QueryCreateParams {
content: string;
/**
- * Role of sender
+ * Role of the sender
*/
role: 'user' | 'system' | 'assistant';
}
diff --git a/src/resources/agents/tune/tune.ts b/src/resources/agents/tune/tune.ts
index e2e5fc6..998b74a 100644
--- a/src/resources/agents/tune/tune.ts
+++ b/src/resources/agents/tune/tune.ts
@@ -57,21 +57,21 @@ export interface TuneCreateParams {
* JSON object represents a single training example. The four required fields are
* `guideline`, `prompt`, `reference`, and `knowledge`.
*
- * - `knowledge` (`list[str]`): Knowledge or retrievals used to generate the
- * reference response, as a list of string text chunks
+ * - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
+ * answer. `knowledge` is a list of retrieved text chunks.
*
- * - `reference` field should be the model's response to the prompt.
+ * - `reference` (`str`): The gold-standard answer to the prompt.
*
- * - `guideline` (`str): Guidelines or criteria for model output
+ * - `guideline` (`str`): Guidelines for model output.
*
- * - `prompt` (required, `string`): Prompt or question model should respond to.
+ * - `prompt` (`str`): Question for the model to respond to.
*
* Example:
*
* ```json
* [
* {
- * "guideline": "The response should be accurate.",
+ * "guideline": "The answer should be accurate.",
* "prompt": "What was last quarter's revenue?",
* "reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
* "knowledge": [
diff --git a/src/resources/generate.ts b/src/resources/generate.ts
new file mode 100644
index 0000000..cb4f5db
--- /dev/null
+++ b/src/resources/generate.ts
@@ -0,0 +1,86 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../resource';
+import * as Core from '../core';
+
+export class Generate extends APIResource {
+ /**
+ * Generate a response using Contextual's Grounded Language Model (GLM), an LLM
+ * engineered specifically to prioritize faithfulness to in-context retrievals over
+ * parametric knowledge to reduce hallucinations in Retrieval-Augmented Generation.
+ *
+ * The total request cannot exceed 6,100 tokens.
+ */
+ create(body: GenerateCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/generate', { body, ...options });
+ }
+}
+
+/**
+ * /generate result object.
+ */
+export interface GenerateCreateResponse {
+ /**
+ * The model's response to the last user message.
+ */
+ response: string;
+}
+
+export interface GenerateCreateParams {
+ /**
+ * Extra parameters to be passed to Contextual's GLM
+ */
+ extra_body: GenerateCreateParams.ExtraBody;
+
+ /**
+ * List of messages in the conversation so far. The last message must be from the
+ * user.
+ */
+ messages: Array;
+
+ /**
+ * The version of the Contextual's GLM to use. Currently, we just have "v1".
+ */
+ model: string;
+}
+
+export namespace GenerateCreateParams {
+ /**
+ * Extra parameters to be passed to Contextual's GLM
+ */
+ export interface ExtraBody {
+ /**
+ * The knowledge sources the model can use when generating a response.
+ */
+ knowledge: Array;
+
+ /**
+ * Instructions that the model follows when generating responses. Note that we do
+ * not guarantee that the model follows these instructions exactly.
+ */
+ system_prompt?: string;
+ }
+
+ /**
+ * Message object for a message sent or received in a /query and /generate
+ * conversation
+ */
+ export interface Message {
+ /**
+ * Content of the message
+ */
+ content: string;
+
+ /**
+ * Role of the sender
+ */
+ role: 'user' | 'system' | 'assistant';
+ }
+}
+
+export declare namespace Generate {
+ export {
+ type GenerateCreateResponse as GenerateCreateResponse,
+ type GenerateCreateParams as GenerateCreateParams,
+ };
+}
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 0ccc44b..2036b1b 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -24,4 +24,6 @@ export {
type DatastoreCreateParams,
type DatastoreListParams,
} from './datastores/datastores';
+export { Generate, type GenerateCreateResponse, type GenerateCreateParams } from './generate';
export { LMUnit, type LMUnitCreateResponse, type LMUnitCreateParams } from './lmunit';
+export { Rerank, type RerankCreateResponse, type RerankCreateParams } from './rerank';
diff --git a/src/resources/rerank.ts b/src/resources/rerank.ts
new file mode 100644
index 0000000..c4b8bed
--- /dev/null
+++ b/src/resources/rerank.ts
@@ -0,0 +1,75 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../resource';
+import * as Core from '../core';
+
+export class Rerank extends APIResource {
+ /**
+ * Rank a list of documents according to their relevance to a query.
+ *
+ * The total request cannot exceed 400,000 tokens. The combined length of any
+ * document and the query must not exceed 4,000 tokens.
+ */
+ create(body: RerankCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/rerank', { body, ...options });
+ }
+}
+
+/**
+ * Rerank output response.
+ */
+export interface RerankCreateResponse {
+ /**
+ * The ranked list of documents containing the index of the document and the
+ * relevance score, sorted by relevance score.
+ */
+ results: Array;
+}
+
+export namespace RerankCreateResponse {
+ /**
+ * Reranked result object.
+ */
+ export interface Result {
+ /**
+ * Index of the document in the input list, starting with 0
+ */
+ index: number;
+
+ /**
+ * Relevance scores assess how likely a document is to have information that is
+ * helpful to answer the query. Our model outputs the scores in a wide range, and
+ * we normalize scores to a 0-1 scale and truncate the response to 8 decimal
+ * places. Our reranker is designed for RAG, so its purpose is to check whether a
+ * document has information that is helpful to answer the query. A reranker that is
+ * designed for direct Q&A (Question & Answer) would behave differently.
+ */
+ relevance_score: number;
+ }
+}
+
+export interface RerankCreateParams {
+ /**
+ * The texts to be reranked according to their relevance to the query
+ */
+ documents: Array;
+
+ /**
+ * The version of the reranker to use. Currently, we just have "v1".
+ */
+ model: string;
+
+ /**
+ * The string against which documents will be ranked for relevance
+ */
+ query: string;
+
+ /**
+ * The number of top-ranked results to return
+ */
+ top_n?: number;
+}
+
+export declare namespace Rerank {
+ export { type RerankCreateResponse as RerankCreateResponse, type RerankCreateParams as RerankCreateParams };
+}
diff --git a/tests/api-resources/generate.test.ts b/tests/api-resources/generate.test.ts
new file mode 100644
index 0000000..4e89314
--- /dev/null
+++ b/tests/api-resources/generate.test.ts
@@ -0,0 +1,34 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import ContextualAI from 'contextual-client';
+import { Response } from 'node-fetch';
+
+const client = new ContextualAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource generate', () => {
+ test('create: only required params', async () => {
+ const responsePromise = client.generate.create({
+ extra_body: { knowledge: ['string'] },
+ messages: [{ content: 'content', role: 'user' }],
+ model: 'model',
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await client.generate.create({
+ extra_body: { knowledge: ['string'], system_prompt: 'system_prompt' },
+ messages: [{ content: 'content', role: 'user' }],
+ model: 'model',
+ });
+ });
+});
diff --git a/tests/api-resources/rerank.test.ts b/tests/api-resources/rerank.test.ts
new file mode 100644
index 0000000..3b01b31
--- /dev/null
+++ b/tests/api-resources/rerank.test.ts
@@ -0,0 +1,31 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import ContextualAI from 'contextual-client';
+import { Response } from 'node-fetch';
+
+const client = new ContextualAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource rerank', () => {
+ test('create: only required params', async () => {
+ const responsePromise = client.rerank.create({ documents: ['string'], model: 'model', query: 'x' });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await client.rerank.create({
+ documents: ['string'],
+ model: 'model',
+ query: 'x',
+ top_n: 0,
+ });
+ });
+});