From eef8a87c1f4d1c57fce697103d07c8510fcc4520 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 15 Jan 2025 08:08:16 +0000
Subject: [PATCH 1/4] feat(api): update via SDK Studio (#16)
---
.stats.yml | 4 +-
README.md | 4 +-
api.md | 19 +-
src/contextual/pagination.py | 42 +
src/contextual/resources/agents/agents.py | 44 +-
.../resources/agents/datasets/__init__.py | 14 -
.../resources/agents/datasets/datasets.py | 32 -
.../resources/agents/datasets/evaluate.py | 64 +-
.../resources/agents/datasets/tune.py | 911 ------------------
.../resources/agents/evaluate/evaluate.py | 86 +-
.../resources/agents/evaluate/jobs.py | 46 +-
src/contextual/resources/agents/query.py | 82 +-
src/contextual/resources/agents/tune/jobs.py | 48 +-
.../resources/agents/tune/models.py | 4 +-
src/contextual/resources/agents/tune/tune.py | 68 +-
.../resources/datastores/datastores.py | 28 +-
src/contextual/types/agent_metadata.py | 8 +-
src/contextual/types/agent_update_params.py | 8 +-
.../types/agents/datasets/__init__.py | 5 -
.../agents/datasets/tune_create_params.py | 20 -
.../types/agents/datasets/tune_list_params.py | 15 -
.../agents/datasets/tune_metadata_params.py | 15 -
.../agents/datasets/tune_retrieve_params.py | 21 -
.../agents/datasets/tune_update_params.py | 23 -
.../types/agents/evaluate_create_params.py | 31 +-
.../types/agents/query_create_params.py | 16 +-
.../types/agents/query_feedback_params.py | 6 +-
.../types/agents/query_metrics_params.py | 4 +-
.../agents/query_retrieval_info_params.py | 2 +-
.../agents/tune/list_tune_jobs_response.py | 8 +-
.../agents/tune/list_tune_models_response.py | 8 +-
.../types/agents/tune/tune_job_metadata.py | 12 +-
.../types/agents/tune_create_params.py | 19 +-
src/contextual/types/list_agents_response.py | 2 +-
.../agents/datasets/test_tune.py | 716 --------------
tests/api_resources/agents/test_evaluate.py | 4 +-
tests/api_resources/agents/test_query.py | 4 +-
tests/api_resources/test_agents.py | 18 +-
38 files changed, 363 insertions(+), 2098 deletions(-)
delete mode 100644 src/contextual/resources/agents/datasets/tune.py
delete mode 100644 src/contextual/types/agents/datasets/tune_create_params.py
delete mode 100644 src/contextual/types/agents/datasets/tune_list_params.py
delete mode 100644 src/contextual/types/agents/datasets/tune_metadata_params.py
delete mode 100644 src/contextual/types/agents/datasets/tune_retrieve_params.py
delete mode 100644 src/contextual/types/agents/datasets/tune_update_params.py
delete mode 100644 tests/api_resources/agents/datasets/test_tune.py
diff --git a/.stats.yml b/.stats.yml
index a46d9c6..f83abfd 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
-configured_endpoints: 39
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-478f305e228bf07625e8d1166c8a8d2a38c532d27012ce7ed906d0a3728f26f9.yml
+configured_endpoints: 33
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-4ed32c3243ce7a772e55bb1ba204736fc3fb1d712d8ca0eb91bac0c7ac626938.yml
diff --git a/README.md b/README.md
index 6676409..feb05fc 100644
--- a/README.md
+++ b/README.md
@@ -121,7 +121,7 @@ first_page = await client.agents.list()
if first_page.has_next_page():
print(f"will fetch next page using these details: {first_page.next_page_info()}")
next_page = await first_page.get_next_page()
- print(f"number of items we just fetched: {len(next_page.data)}")
+ print(f"number of items we just fetched: {len(next_page.agents)}")
# Remove `await` for non-async usage.
```
@@ -132,7 +132,7 @@ Or just work directly with the returned data:
first_page = await client.agents.list()
print(f"next page cursor: {first_page.next_cursor}") # => "next page cursor: ..."
-for agent in first_page.data:
+for agent in first_page.agents:
print(agent.id)
# Remove `await` for non-async usage.
diff --git a/api.md b/api.md
index 0c6a7ea..e479a2d 100644
--- a/api.md
+++ b/api.md
@@ -58,7 +58,7 @@ Methods:
- client.agents.create(\*\*params) -> CreateAgentOutput
- client.agents.update(agent_id, \*\*params) -> object
-- client.agents.list(\*\*params) -> SyncPage[Agent]
+- client.agents.list(\*\*params) -> SyncAgentsPage[Agent]
- client.agents.delete(agent_id) -> object
- client.agents.metadata(agent_id) -> AgentMetadata
@@ -120,23 +120,6 @@ Types:
from contextual.types.agents import CreateDatasetResponse, DatasetMetadata, ListDatasetsResponse
```
-### Tune
-
-Types:
-
-```python
-from contextual.types.agents.datasets import TuneDeleteResponse
-```
-
-Methods:
-
-- client.agents.datasets.tune.create(agent_id, \*\*params) -> CreateDatasetResponse
-- client.agents.datasets.tune.retrieve(dataset_name, \*, agent_id, \*\*params) -> BinaryAPIResponse
-- client.agents.datasets.tune.update(dataset_name, \*, agent_id, \*\*params) -> CreateDatasetResponse
-- client.agents.datasets.tune.list(agent_id, \*\*params) -> ListDatasetsResponse
-- client.agents.datasets.tune.delete(dataset_name, \*, agent_id) -> object
-- client.agents.datasets.tune.metadata(dataset_name, \*, agent_id, \*\*params) -> DatasetMetadata
-
### Evaluate
Types:
diff --git a/src/contextual/pagination.py b/src/contextual/pagination.py
index 20f31aa..13e3dcb 100644
--- a/src/contextual/pagination.py
+++ b/src/contextual/pagination.py
@@ -10,6 +10,8 @@
"AsyncDatastoresPage",
"SyncDocumentsPage",
"AsyncDocumentsPage",
+ "SyncAgentsPage",
+ "AsyncAgentsPage",
"SyncPage",
"AsyncPage",
]
@@ -97,6 +99,46 @@ def next_page_info(self) -> Optional[PageInfo]:
return PageInfo(params={"cursor": next_cursor})
+class SyncAgentsPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
+ agents: List[_T]
+ next_cursor: Optional[str] = None
+
+ @override
+ def _get_page_items(self) -> List[_T]:
+ agents = self.agents
+ if not agents:
+ return []
+ return agents
+
+ @override
+ def next_page_info(self) -> Optional[PageInfo]:
+ next_cursor = self.next_cursor
+ if not next_cursor:
+ return None
+
+ return PageInfo(params={"cursor": next_cursor})
+
+
+class AsyncAgentsPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
+ agents: List[_T]
+ next_cursor: Optional[str] = None
+
+ @override
+ def _get_page_items(self) -> List[_T]:
+ agents = self.agents
+ if not agents:
+ return []
+ return agents
+
+ @override
+ def next_page_info(self) -> Optional[PageInfo]:
+ next_cursor = self.next_cursor
+ if not next_cursor:
+ return None
+
+ return PageInfo(params={"cursor": next_cursor})
+
+
class SyncPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
data: List[_T]
next_cursor: Optional[str] = None
diff --git a/src/contextual/resources/agents/agents.py b/src/contextual/resources/agents/agents.py
index 1bb15c0..bea8eda 100644
--- a/src/contextual/resources/agents/agents.py
+++ b/src/contextual/resources/agents/agents.py
@@ -36,7 +36,7 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ...pagination import SyncPage, AsyncPage
+from ...pagination import SyncAgentsPage, AsyncAgentsPage
from ...types.agent import Agent
from ..._base_client import AsyncPaginator, make_request_options
from .datasets.datasets import (
@@ -115,8 +115,8 @@ def create(
"""
Create a new `Agent` with a specific configuration.
- This creates a specialized RAG `Agent` which queries over a `Datastore` to
- retrieve relevant data on which its generations are grounded.
+ This creates a specialized RAG `Agent` which queries over one or multiple
+ `Datastores` to retrieve relevant data on which its generations are grounded.
Retrieval and generation parameters are defined in the provided `Agent`
configuration.
@@ -188,14 +188,13 @@ def update(
Fields not included in the request body will not be modified.
Args:
- agent_id: Agent ID of the agent to edit
+ agent_id: ID of the agent to edit
datastore_ids: IDs of the datastore to associate with the agent.
- llm_model_id: Optional model ID of a tuned model to use for generation. Model must have been
- tuned on this agent; tuned models cannot be used across agents. Uses default
- model if none is specified. Set to `default` to deactivate the tuned model and
- use the default model.
+ llm_model_id: The model ID to use for generation. Tuned models can only be used for the agents
+ on which they were tuned. If no model is specified, the default model is used.
+ Set to `default` to switch from a tuned model to the default model.
suggested_queries: These queries will show up as suggestions in the Contextual UI when users load
the agent. We recommend including common queries that users will ask, as well as
@@ -243,7 +242,7 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncPage[Agent]:
+ ) -> SyncAgentsPage[Agent]:
"""
Retrieve a list of all `Agents`.
@@ -263,7 +262,7 @@ def list(
"""
return self._get_api_list(
"/agents",
- page=SyncPage[Agent],
+ page=SyncAgentsPage[Agent],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -300,7 +299,7 @@ def delete(
`DELETE /datastores/{datastore_id}` API.
Args:
- agent_id: Agent ID of the agent to delete
+ agent_id: ID of the agent to delete
extra_headers: Send extra headers
@@ -335,7 +334,7 @@ def metadata(
Get metadata and configuration of a given `Agent`.
Args:
- agent_id: Agent ID of the agent to retrieve details for
+ agent_id: ID of the agent for which to retrieve details
extra_headers: Send extra headers
@@ -410,8 +409,8 @@ async def create(
"""
Create a new `Agent` with a specific configuration.
- This creates a specialized RAG `Agent` which queries over a `Datastore` to
- retrieve relevant data on which its generations are grounded.
+ This creates a specialized RAG `Agent` which queries over one or multiple
+ `Datastores` to retrieve relevant data on which its generations are grounded.
Retrieval and generation parameters are defined in the provided `Agent`
configuration.
@@ -483,14 +482,13 @@ async def update(
Fields not included in the request body will not be modified.
Args:
- agent_id: Agent ID of the agent to edit
+ agent_id: ID of the agent to edit
datastore_ids: IDs of the datastore to associate with the agent.
- llm_model_id: Optional model ID of a tuned model to use for generation. Model must have been
- tuned on this agent; tuned models cannot be used across agents. Uses default
- model if none is specified. Set to `default` to deactivate the tuned model and
- use the default model.
+ llm_model_id: The model ID to use for generation. Tuned models can only be used for the agents
+ on which they were tuned. If no model is specified, the default model is used.
+ Set to `default` to switch from a tuned model to the default model.
suggested_queries: These queries will show up as suggestions in the Contextual UI when users load
the agent. We recommend including common queries that users will ask, as well as
@@ -538,7 +536,7 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[Agent, AsyncPage[Agent]]:
+ ) -> AsyncPaginator[Agent, AsyncAgentsPage[Agent]]:
"""
Retrieve a list of all `Agents`.
@@ -558,7 +556,7 @@ def list(
"""
return self._get_api_list(
"/agents",
- page=AsyncPage[Agent],
+ page=AsyncAgentsPage[Agent],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -595,7 +593,7 @@ async def delete(
`DELETE /datastores/{datastore_id}` API.
Args:
- agent_id: Agent ID of the agent to delete
+ agent_id: ID of the agent to delete
extra_headers: Send extra headers
@@ -630,7 +628,7 @@ async def metadata(
Get metadata and configuration of a given `Agent`.
Args:
- agent_id: Agent ID of the agent to retrieve details for
+ agent_id: ID of the agent for which to retrieve details
extra_headers: Send extra headers
diff --git a/src/contextual/resources/agents/datasets/__init__.py b/src/contextual/resources/agents/datasets/__init__.py
index 059bd75..c97b011 100644
--- a/src/contextual/resources/agents/datasets/__init__.py
+++ b/src/contextual/resources/agents/datasets/__init__.py
@@ -1,13 +1,5 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from .tune import (
- TuneResource,
- AsyncTuneResource,
- TuneResourceWithRawResponse,
- AsyncTuneResourceWithRawResponse,
- TuneResourceWithStreamingResponse,
- AsyncTuneResourceWithStreamingResponse,
-)
from .datasets import (
DatasetsResource,
AsyncDatasetsResource,
@@ -26,12 +18,6 @@
)
__all__ = [
- "TuneResource",
- "AsyncTuneResource",
- "TuneResourceWithRawResponse",
- "AsyncTuneResourceWithRawResponse",
- "TuneResourceWithStreamingResponse",
- "AsyncTuneResourceWithStreamingResponse",
"EvaluateResource",
"AsyncEvaluateResource",
"EvaluateResourceWithRawResponse",
diff --git a/src/contextual/resources/agents/datasets/datasets.py b/src/contextual/resources/agents/datasets/datasets.py
index 986775f..ba9a5bc 100644
--- a/src/contextual/resources/agents/datasets/datasets.py
+++ b/src/contextual/resources/agents/datasets/datasets.py
@@ -2,14 +2,6 @@
from __future__ import annotations
-from .tune import (
- TuneResource,
- AsyncTuneResource,
- TuneResourceWithRawResponse,
- AsyncTuneResourceWithRawResponse,
- TuneResourceWithStreamingResponse,
- AsyncTuneResourceWithStreamingResponse,
-)
from .evaluate import (
EvaluateResource,
AsyncEvaluateResource,
@@ -25,10 +17,6 @@
class DatasetsResource(SyncAPIResource):
- @cached_property
- def tune(self) -> TuneResource:
- return TuneResource(self._client)
-
@cached_property
def evaluate(self) -> EvaluateResource:
return EvaluateResource(self._client)
@@ -54,10 +42,6 @@ def with_streaming_response(self) -> DatasetsResourceWithStreamingResponse:
class AsyncDatasetsResource(AsyncAPIResource):
- @cached_property
- def tune(self) -> AsyncTuneResource:
- return AsyncTuneResource(self._client)
-
@cached_property
def evaluate(self) -> AsyncEvaluateResource:
return AsyncEvaluateResource(self._client)
@@ -86,10 +70,6 @@ class DatasetsResourceWithRawResponse:
def __init__(self, datasets: DatasetsResource) -> None:
self._datasets = datasets
- @cached_property
- def tune(self) -> TuneResourceWithRawResponse:
- return TuneResourceWithRawResponse(self._datasets.tune)
-
@cached_property
def evaluate(self) -> EvaluateResourceWithRawResponse:
return EvaluateResourceWithRawResponse(self._datasets.evaluate)
@@ -99,10 +79,6 @@ class AsyncDatasetsResourceWithRawResponse:
def __init__(self, datasets: AsyncDatasetsResource) -> None:
self._datasets = datasets
- @cached_property
- def tune(self) -> AsyncTuneResourceWithRawResponse:
- return AsyncTuneResourceWithRawResponse(self._datasets.tune)
-
@cached_property
def evaluate(self) -> AsyncEvaluateResourceWithRawResponse:
return AsyncEvaluateResourceWithRawResponse(self._datasets.evaluate)
@@ -112,10 +88,6 @@ class DatasetsResourceWithStreamingResponse:
def __init__(self, datasets: DatasetsResource) -> None:
self._datasets = datasets
- @cached_property
- def tune(self) -> TuneResourceWithStreamingResponse:
- return TuneResourceWithStreamingResponse(self._datasets.tune)
-
@cached_property
def evaluate(self) -> EvaluateResourceWithStreamingResponse:
return EvaluateResourceWithStreamingResponse(self._datasets.evaluate)
@@ -125,10 +97,6 @@ class AsyncDatasetsResourceWithStreamingResponse:
def __init__(self, datasets: AsyncDatasetsResource) -> None:
self._datasets = datasets
- @cached_property
- def tune(self) -> AsyncTuneResourceWithStreamingResponse:
- return AsyncTuneResourceWithStreamingResponse(self._datasets.tune)
-
@cached_property
def evaluate(self) -> AsyncEvaluateResourceWithStreamingResponse:
return AsyncEvaluateResourceWithStreamingResponse(self._datasets.evaluate)
diff --git a/src/contextual/resources/agents/datasets/evaluate.py b/src/contextual/resources/agents/datasets/evaluate.py
index d0facfd..fdf4c21 100644
--- a/src/contextual/resources/agents/datasets/evaluate.py
+++ b/src/contextual/resources/agents/datasets/evaluate.py
@@ -90,19 +90,12 @@ def create(
defined for the `dataset_type`.
File schema for `dataset_type` `evaluation_set` is a JSONL or CSV file where
- each line is one JSON object with the following keys:
+ each line is one JSON object with the following required keys:
- `prompt` (required, `string`): Prompt or question
- - `response` (optional, `string`): Optional response to evaluate
-
- `reference` (required, `string`): Required reference or ground truth response
- - `guideline` (optional, `string`): Optional evaluation guidelines
-
- - `knowledge` (optional, `string`): Optional retrieved context for evaluation,
- as a list of string text chunks
-
Args:
agent_id: Agent ID to associate with the evaluation dataset
@@ -166,9 +159,13 @@ def retrieve(
The `Dataset` content is downloaded in batches. Batch size can be configured to
meet specific processing requirements.
- Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content
- with: - Content-Type: application/octet-stream - Content-Disposition:
- attachment - Chunked transfer encoding
+ Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with:
+
+ - Content-Type: application/octet-stream
+
+ - Content-Disposition: attachment
+
+ - Chunked transfer encoding
Args:
agent_id: Agent ID associated with the evaluation dataset
@@ -232,18 +229,11 @@ def update(
validating against its schema.
File schema for `dataset_type` `evaluation_set` is a JSONL file where each line
- is one JSON object with the following keys:
-
- - `prompt` (required, `string`): Prompt or question
-
- - `response` (optional, `string`): Optional response to evaluate
-
- - `reference` (required, `string`): Required reference or ground truth response
+ is one JSON object with the following required keys:
- - `guideline` (optional, `string`): Optional evaluation guidelines
+ - `prompt` (`string`): Prompt or question
- - `knowledge` (optional, `string`): Optional retrieved context for evaluation,
- as a list of string text chunks
+ - `reference` (`string`): Required reference or ground truth response
Args:
agent_id: Agent ID associated with the evaluation dataset
@@ -478,19 +468,12 @@ async def create(
defined for the `dataset_type`.
File schema for `dataset_type` `evaluation_set` is a JSONL or CSV file where
- each line is one JSON object with the following keys:
+ each line is one JSON object with the following required keys:
- `prompt` (required, `string`): Prompt or question
- - `response` (optional, `string`): Optional response to evaluate
-
- `reference` (required, `string`): Required reference or ground truth response
- - `guideline` (optional, `string`): Optional evaluation guidelines
-
- - `knowledge` (optional, `string`): Optional retrieved context for evaluation,
- as a list of string text chunks
-
Args:
agent_id: Agent ID to associate with the evaluation dataset
@@ -554,9 +537,13 @@ async def retrieve(
The `Dataset` content is downloaded in batches. Batch size can be configured to
meet specific processing requirements.
- Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content
- with: - Content-Type: application/octet-stream - Content-Disposition:
- attachment - Chunked transfer encoding
+ Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with:
+
+ - Content-Type: application/octet-stream
+
+ - Content-Disposition: attachment
+
+ - Chunked transfer encoding
Args:
agent_id: Agent ID associated with the evaluation dataset
@@ -620,18 +607,11 @@ async def update(
validating against its schema.
File schema for `dataset_type` `evaluation_set` is a JSONL file where each line
- is one JSON object with the following keys:
-
- - `prompt` (required, `string`): Prompt or question
-
- - `response` (optional, `string`): Optional response to evaluate
-
- - `reference` (required, `string`): Required reference or ground truth response
+ is one JSON object with the following required keys:
- - `guideline` (optional, `string`): Optional evaluation guidelines
+ - `prompt` (`string`): Prompt or question
- - `knowledge` (optional, `string`): Optional retrieved context for evaluation,
- as a list of string text chunks
+ - `reference` (`string`): Required reference or ground truth response
Args:
agent_id: Agent ID associated with the evaluation dataset
diff --git a/src/contextual/resources/agents/datasets/tune.py b/src/contextual/resources/agents/datasets/tune.py
deleted file mode 100644
index 0653750..0000000
--- a/src/contextual/resources/agents/datasets/tune.py
+++ /dev/null
@@ -1,911 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Mapping, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from ...._utils import (
- extract_files,
- maybe_transform,
- deepcopy_minimal,
- async_maybe_transform,
-)
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- BinaryAPIResponse,
- AsyncBinaryAPIResponse,
- StreamedBinaryAPIResponse,
- AsyncStreamedBinaryAPIResponse,
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- to_custom_raw_response_wrapper,
- async_to_streamed_response_wrapper,
- to_custom_streamed_response_wrapper,
- async_to_custom_raw_response_wrapper,
- async_to_custom_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.agents.datasets import (
- tune_list_params,
- tune_create_params,
- tune_update_params,
- tune_metadata_params,
- tune_retrieve_params,
-)
-from ....types.agents.dataset_metadata import DatasetMetadata
-from ....types.agents.list_datasets_response import ListDatasetsResponse
-from ....types.agents.create_dataset_response import CreateDatasetResponse
-
-__all__ = ["TuneResource", "AsyncTuneResource"]
-
-
-class TuneResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> TuneResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return the
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
- """
- return TuneResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> TuneResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
- """
- return TuneResourceWithStreamingResponse(self)
-
- def create(
- self,
- agent_id: str,
- *,
- dataset_name: str,
- dataset_type: Literal["tuning_set"],
- file: FileTypes,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateDatasetResponse:
- """
- Create a new tuning `Dataset` for the specified `Agent` using the provided JSONL
- file. A `Dataset` is a versioned collection of samples conforming to a
- particular schema, and can be used as a source of training and test data for
- tuning jobs.
-
- Each `Dataset` is versioned and validated against its schema during creation and
- subsequent updates. The provided `Dataset` file must conform to the schema
- defined for the `dataset_type`.
-
- File schema for `dataset_type` `tuning_set` is a JSONL file where each line is
- one JSON object with the following keys:
-
- - `prompt` (required, `string`): Prompt or question
-
- - `reference` (required, `string`): Reference or ground truth response
-
- - `guideline` (required, `string`): Guidelines or criteria for model output
-
- - `knowledge` (required, `string`): Knowledge or retrievals used to generate the
- reference response, as a list of string text chunks
-
- Args:
- agent_id: Agent ID to associate with the tune dataset
-
- dataset_name: Name of the tune dataset
-
- dataset_type: Type of tune dataset which determines its schema and validation rules.
-
- file: JSONL file containing the tune dataset
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- body = deepcopy_minimal(
- {
- "dataset_name": dataset_name,
- "dataset_type": dataset_type,
- "file": file,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- f"/agents/{agent_id}/datasets/tune",
- body=maybe_transform(body, tune_create_params.TuneCreateParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateDatasetResponse,
- )
-
- def retrieve(
- self,
- dataset_name: str,
- *,
- agent_id: str,
- batch_size: int | NotGiven = NOT_GIVEN,
- version: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> BinaryAPIResponse:
- """Stream the raw content of a tuning `Dataset` version.
-
- If no version is
- specified, the latest version is used.
-
- The `Dataset` content is downloaded in batches. Batch size can be configured to
- meet specific processing requirements.
-
- Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content
- with: - Content-Type: application/octet-stream - Content-Disposition:
- attachment - Chunked transfer encoding
-
- Args:
- agent_id: Agent ID associated with the tune dataset
-
- dataset_name: Name of the tune dataset to retrieve
-
- batch_size: Batch size for processing
-
- version: Version number of the tune dataset to retrieve. Defaults to the latest version
- if not specified.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- if not dataset_name:
- raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
- extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
- return self._get(
- f"/agents/{agent_id}/datasets/tune/{dataset_name}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "batch_size": batch_size,
- "version": version,
- },
- tune_retrieve_params.TuneRetrieveParams,
- ),
- ),
- cast_to=BinaryAPIResponse,
- )
-
- def update(
- self,
- dataset_name: str,
- *,
- agent_id: str,
- dataset_type: Literal["tuning_set"],
- file: FileTypes,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateDatasetResponse:
- """
- Append to an existing tuning `Dataset`.
-
- Create a new version of the dataset by appending content to the `Dataset` and
- validating against its schema.
-
- File schema for `dataset_type` `tuning_set` is a JSONL file where each line is
- one JSON object with the following keys:
-
- - `prompt` (required, `string`): Prompt or question
-
- - `reference` (required, `string`): Reference or ground truth response
-
- - `guideline` (required, `string`): Guidelines or criteria for model output
-
- - `knowledge` (required, `string`): Knowledge or retrievals used to generate the
- reference response, as a list of string text chunks
-
- Args:
- agent_id: Agent ID associated with the tune dataset
-
- dataset_name: Name of the tune dataset to append to
-
- dataset_type: Type of tune dataset which determines its schema and validation rules. Must
- match the `dataset_type` used at dataset creation time.
-
- file: JSONL file containing the entries to append to the tune dataset
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- if not dataset_name:
- raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
- body = deepcopy_minimal(
- {
- "dataset_type": dataset_type,
- "file": file,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._put(
- f"/agents/{agent_id}/datasets/tune/{dataset_name}",
- body=maybe_transform(body, tune_update_params.TuneUpdateParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateDatasetResponse,
- )
-
- def list(
- self,
- agent_id: str,
- *,
- dataset_name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListDatasetsResponse:
- """
- List all tuning `Datasets` and their versions belonging to a particular `Agent`.
-
- If a `dataset_name` filter is provided, all versions of that `Dataset` will be
- listed.
-
- Includes metadata and schema for each `Dataset` version.
-
- Args:
- agent_id: Agent ID for which to list associated evaluation datasets
-
- dataset_name: Optional dataset name to filter the results by. If provided, only versions from
- that dataset are listed.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- return self._get(
- f"/agents/{agent_id}/datasets/tune",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"dataset_name": dataset_name}, tune_list_params.TuneListParams),
- ),
- cast_to=ListDatasetsResponse,
- )
-
- def delete(
- self,
- dataset_name: str,
- *,
- agent_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> object:
- """
- Delete a tuning `Dataset` and all its versions.
-
- Permanently removes the `Dataset`, including all associated metadata.
-
- This operation is irreversible.
-
- Args:
- agent_id: Agent ID associated with the tune dataset
-
- dataset_name: Name of the tune dataset to delete
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- if not dataset_name:
- raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
- return self._delete(
- f"/agents/{agent_id}/datasets/tune/{dataset_name}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=object,
- )
-
- def metadata(
- self,
- dataset_name: str,
- *,
- agent_id: str,
- version: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> DatasetMetadata:
- """
- Retrieve details of a specific tuning `Dataset` version, or the latest version
- if no `version` is specified.
-
- Provides comprehensive information about the `Dataset`, including its metadata
- and schema.
-
- Args:
- agent_id: Agent ID associated with the tune dataset
-
- dataset_name: Name of the tune dataset to retrieve details for
-
- version: Version number of the dataset. Defaults to the latest version if not specified.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- if not dataset_name:
- raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
- return self._get(
- f"/agents/{agent_id}/datasets/tune/{dataset_name}/metadata",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"version": version}, tune_metadata_params.TuneMetadataParams),
- ),
- cast_to=DatasetMetadata,
- )
-
-
-class AsyncTuneResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncTuneResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return the
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncTuneResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncTuneResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
- """
- return AsyncTuneResourceWithStreamingResponse(self)
-
- async def create(
- self,
- agent_id: str,
- *,
- dataset_name: str,
- dataset_type: Literal["tuning_set"],
- file: FileTypes,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateDatasetResponse:
- """
- Create a new tuning `Dataset` for the specified `Agent` using the provided JSONL
- file. A `Dataset` is a versioned collection of samples conforming to a
- particular schema, and can be used as a source of training and test data for
- tuning jobs.
-
- Each `Dataset` is versioned and validated against its schema during creation and
- subsequent updates. The provided `Dataset` file must conform to the schema
- defined for the `dataset_type`.
-
- File schema for `dataset_type` `tuning_set` is a JSONL file where each line is
- one JSON object with the following keys:
-
- - `prompt` (required, `string`): Prompt or question
-
- - `reference` (required, `string`): Reference or ground truth response
-
- - `guideline` (required, `string`): Guidelines or criteria for model output
-
- - `knowledge` (required, `string`): Knowledge or retrievals used to generate the
- reference response, as a list of string text chunks
-
- Args:
- agent_id: Agent ID to associate with the tune dataset
-
- dataset_name: Name of the tune dataset
-
- dataset_type: Type of tune dataset which determines its schema and validation rules.
-
- file: JSONL file containing the tune dataset
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- body = deepcopy_minimal(
- {
- "dataset_name": dataset_name,
- "dataset_type": dataset_type,
- "file": file,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- f"/agents/{agent_id}/datasets/tune",
- body=await async_maybe_transform(body, tune_create_params.TuneCreateParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateDatasetResponse,
- )
-
- async def retrieve(
- self,
- dataset_name: str,
- *,
- agent_id: str,
- batch_size: int | NotGiven = NOT_GIVEN,
- version: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncBinaryAPIResponse:
- """Stream the raw content of a tuning `Dataset` version.
-
- If no version is
- specified, the latest version is used.
-
- The `Dataset` content is downloaded in batches. Batch size can be configured to
- meet specific processing requirements.
-
- Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content
- with: - Content-Type: application/octet-stream - Content-Disposition:
- attachment - Chunked transfer encoding
-
- Args:
- agent_id: Agent ID associated with the tune dataset
-
- dataset_name: Name of the tune dataset to retrieve
-
- batch_size: Batch size for processing
-
- version: Version number of the tune dataset to retrieve. Defaults to the latest version
- if not specified.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- if not dataset_name:
- raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
- extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
- return await self._get(
- f"/agents/{agent_id}/datasets/tune/{dataset_name}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "batch_size": batch_size,
- "version": version,
- },
- tune_retrieve_params.TuneRetrieveParams,
- ),
- ),
- cast_to=AsyncBinaryAPIResponse,
- )
-
- async def update(
- self,
- dataset_name: str,
- *,
- agent_id: str,
- dataset_type: Literal["tuning_set"],
- file: FileTypes,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateDatasetResponse:
- """
- Append to an existing tuning `Dataset`.
-
- Create a new version of the dataset by appending content to the `Dataset` and
- validating against its schema.
-
- File schema for `dataset_type` `tuning_set` is a JSONL file where each line is
- one JSON object with the following keys:
-
- - `prompt` (required, `string`): Prompt or question
-
- - `reference` (required, `string`): Reference or ground truth response
-
- - `guideline` (required, `string`): Guidelines or criteria for model output
-
- - `knowledge` (required, `string`): Knowledge or retrievals used to generate the
- reference response, as a list of string text chunks
-
- Args:
- agent_id: Agent ID associated with the tune dataset
-
- dataset_name: Name of the tune dataset to append to
-
- dataset_type: Type of tune dataset which determines its schema and validation rules. Must
- match the `dataset_type` used at dataset creation time.
-
- file: JSONL file containing the entries to append to the tune dataset
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- if not dataset_name:
- raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
- body = deepcopy_minimal(
- {
- "dataset_type": dataset_type,
- "file": file,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._put(
- f"/agents/{agent_id}/datasets/tune/{dataset_name}",
- body=await async_maybe_transform(body, tune_update_params.TuneUpdateParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateDatasetResponse,
- )
-
- async def list(
- self,
- agent_id: str,
- *,
- dataset_name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListDatasetsResponse:
- """
- List all tuning `Datasets` and their versions belonging to a particular `Agent`.
-
- If a `dataset_name` filter is provided, all versions of that `Dataset` will be
- listed.
-
- Includes metadata and schema for each `Dataset` version.
-
- Args:
- agent_id: Agent ID for which to list associated evaluation datasets
-
- dataset_name: Optional dataset name to filter the results by. If provided, only versions from
- that dataset are listed.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- return await self._get(
- f"/agents/{agent_id}/datasets/tune",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform({"dataset_name": dataset_name}, tune_list_params.TuneListParams),
- ),
- cast_to=ListDatasetsResponse,
- )
-
- async def delete(
- self,
- dataset_name: str,
- *,
- agent_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> object:
- """
- Delete a tuning `Dataset` and all its versions.
-
- Permanently removes the `Dataset`, including all associated metadata.
-
- This operation is irreversible.
-
- Args:
- agent_id: Agent ID associated with the tune dataset
-
- dataset_name: Name of the tune dataset to delete
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- if not dataset_name:
- raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
- return await self._delete(
- f"/agents/{agent_id}/datasets/tune/{dataset_name}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=object,
- )
-
- async def metadata(
- self,
- dataset_name: str,
- *,
- agent_id: str,
- version: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> DatasetMetadata:
- """
- Retrieve details of a specific tuning `Dataset` version, or the latest version
- if no `version` is specified.
-
- Provides comprehensive information about the `Dataset`, including its metadata
- and schema.
-
- Args:
- agent_id: Agent ID associated with the tune dataset
-
- dataset_name: Name of the tune dataset to retrieve details for
-
- version: Version number of the dataset. Defaults to the latest version if not specified.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not agent_id:
- raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
- if not dataset_name:
- raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
- return await self._get(
- f"/agents/{agent_id}/datasets/tune/{dataset_name}/metadata",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform({"version": version}, tune_metadata_params.TuneMetadataParams),
- ),
- cast_to=DatasetMetadata,
- )
-
-
-class TuneResourceWithRawResponse:
- def __init__(self, tune: TuneResource) -> None:
- self._tune = tune
-
- self.create = to_raw_response_wrapper(
- tune.create,
- )
- self.retrieve = to_custom_raw_response_wrapper(
- tune.retrieve,
- BinaryAPIResponse,
- )
- self.update = to_raw_response_wrapper(
- tune.update,
- )
- self.list = to_raw_response_wrapper(
- tune.list,
- )
- self.delete = to_raw_response_wrapper(
- tune.delete,
- )
- self.metadata = to_raw_response_wrapper(
- tune.metadata,
- )
-
-
-class AsyncTuneResourceWithRawResponse:
- def __init__(self, tune: AsyncTuneResource) -> None:
- self._tune = tune
-
- self.create = async_to_raw_response_wrapper(
- tune.create,
- )
- self.retrieve = async_to_custom_raw_response_wrapper(
- tune.retrieve,
- AsyncBinaryAPIResponse,
- )
- self.update = async_to_raw_response_wrapper(
- tune.update,
- )
- self.list = async_to_raw_response_wrapper(
- tune.list,
- )
- self.delete = async_to_raw_response_wrapper(
- tune.delete,
- )
- self.metadata = async_to_raw_response_wrapper(
- tune.metadata,
- )
-
-
-class TuneResourceWithStreamingResponse:
- def __init__(self, tune: TuneResource) -> None:
- self._tune = tune
-
- self.create = to_streamed_response_wrapper(
- tune.create,
- )
- self.retrieve = to_custom_streamed_response_wrapper(
- tune.retrieve,
- StreamedBinaryAPIResponse,
- )
- self.update = to_streamed_response_wrapper(
- tune.update,
- )
- self.list = to_streamed_response_wrapper(
- tune.list,
- )
- self.delete = to_streamed_response_wrapper(
- tune.delete,
- )
- self.metadata = to_streamed_response_wrapper(
- tune.metadata,
- )
-
-
-class AsyncTuneResourceWithStreamingResponse:
- def __init__(self, tune: AsyncTuneResource) -> None:
- self._tune = tune
-
- self.create = async_to_streamed_response_wrapper(
- tune.create,
- )
- self.retrieve = async_to_custom_streamed_response_wrapper(
- tune.retrieve,
- AsyncStreamedBinaryAPIResponse,
- )
- self.update = async_to_streamed_response_wrapper(
- tune.update,
- )
- self.list = async_to_streamed_response_wrapper(
- tune.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- tune.delete,
- )
- self.metadata = async_to_streamed_response_wrapper(
- tune.metadata,
- )
diff --git a/src/contextual/resources/agents/evaluate/evaluate.py b/src/contextual/resources/agents/evaluate/evaluate.py
index de010c9..42d2232 100644
--- a/src/contextual/resources/agents/evaluate/evaluate.py
+++ b/src/contextual/resources/agents/evaluate/evaluate.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import List, Mapping, cast
+from typing import List, Mapping, Optional, cast
from typing_extensions import Literal
import httpx
@@ -68,7 +68,7 @@ def create(
metrics: List[Literal["equivalence", "groundedness"]],
evalset_file: FileTypes | NotGiven = NOT_GIVEN,
evalset_name: str | NotGiven = NOT_GIVEN,
- model_name: str | NotGiven = NOT_GIVEN,
+ llm_model_id: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -77,34 +77,38 @@ def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CreateEvaluationResponse:
"""
- Launch an `Evaluation` round.
+ Launch an `Evaluation` job which evaluates an `Agent` on a set of test questions
+ and reference answers.
- An `Evaluation` is an asynchronous operation which evaluates an `Agent` on a set
- of test questions and reference answers. An `Evaluation` can select one or more
+ An `Evaluation` is an asynchronous operation. Users can select one or more
metrics to assess the quality of generated answers. These metrics include
- `equivalence` and `groundedness`.
+ `equivalence` and `groundedness`. `equivalence` evaluates if the Agent response
+ is equivalent to the ground truth (model-driven binary classification).
+ `groundedness` decomposes the Agent response into claims and then evaluates if
+ the claims are grounded by the retrieved documents.
- `Evaluation` test set data can be provided in one of two forms: - A CSV
- `evalset_file` containing the columns `prompt`, `reference` (i.e. gold-answers),
- and `knowledge` (optional `list[str]` of retrieved knowledge) - A `dataset_name`
- which refers to an `evaluation_set` `Dataset` created through the `Dataset` API.
+ `Evaluation` data can be provided in one of two forms:
+
+ - A CSV `evalset_file` containing the columns `prompt` (i.e. questions) and
+ `reference` (i.e. gold-answers).
+
+ - An `evalset_name` which refers to a `Dataset` created through the
+ `/datasets/evaluate` API.
Args:
agent_id: Agent ID of the agent to evaluate
metrics: List of metrics to use. Supported metrics are `equivalence` and `groundedness`.
- Use comma-separated list to pass multiple values or use repeated keys.
- evalset_file: Evalset file (CSV) to use for evaluation, containing the columns `prompt`
- (`question`), `reference` (`ground truth response`), and optional additional
- columns based on the selected metrics. Either `dataset_name` or `evalset_file`
- must be provided, but not both.
+ evalset_file: Evalset file (CSV) to use for evaluation, containing the columns `prompt` (i.e.
+ question) and `reference` (i.e. ground truth response). Either `evalset_name` or
+ `evalset_file` must be provided, but not both.
- evalset_name: Name of the dataset to use for evaluation, created through the dataset API.
- Either `dataset_name` or `evalset_file` must be provided, but not both.
+ evalset_name: Name of the Dataset to use for evaluation, created through the
+ `/datasets/evaluate` API. Either `evalset_name` or `evalset_file` must be
+ provided, but not both.
- model_name: Model name of the tuned or aligned model to use. Defaults to the default model
- if not specified.
+ llm_model_id: ID of the model to evaluate. Uses the default model if not specified.
extra_headers: Send extra headers
@@ -121,7 +125,7 @@ def create(
"metrics": metrics,
"evalset_file": evalset_file,
"evalset_name": evalset_name,
- "model_name": model_name,
+ "llm_model_id": llm_model_id,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["evalset_file"]])
@@ -171,7 +175,7 @@ async def create(
metrics: List[Literal["equivalence", "groundedness"]],
evalset_file: FileTypes | NotGiven = NOT_GIVEN,
evalset_name: str | NotGiven = NOT_GIVEN,
- model_name: str | NotGiven = NOT_GIVEN,
+ llm_model_id: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -180,34 +184,38 @@ async def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CreateEvaluationResponse:
"""
- Launch an `Evaluation` round.
+ Launch an `Evaluation` job which evaluates an `Agent` on a set of test questions
+ and reference answers.
- An `Evaluation` is an asynchronous operation which evaluates an `Agent` on a set
- of test questions and reference answers. An `Evaluation` can select one or more
+ An `Evaluation` is an asynchronous operation. Users can select one or more
metrics to assess the quality of generated answers. These metrics include
- `equivalence` and `groundedness`.
+ `equivalence` and `groundedness`. `equivalence` evaluates if the Agent response
+ is equivalent to the ground truth (model-driven binary classification).
+ `groundedness` decomposes the Agent response into claims and then evaluates if
+ the claims are grounded by the retrieved documents.
+
+ `Evaluation` data can be provided in one of two forms:
+
+ - A CSV `evalset_file` containing the columns `prompt` (i.e. questions) and
+ `reference` (i.e. gold-answers).
- `Evaluation` test set data can be provided in one of two forms: - A CSV
- `evalset_file` containing the columns `prompt`, `reference` (i.e. gold-answers),
- and `knowledge` (optional `list[str]` of retrieved knowledge) - A `dataset_name`
- which refers to an `evaluation_set` `Dataset` created through the `Dataset` API.
+ - An `evalset_name` which refers to a `Dataset` created through the
+ `/datasets/evaluate` API.
Args:
agent_id: Agent ID of the agent to evaluate
metrics: List of metrics to use. Supported metrics are `equivalence` and `groundedness`.
- Use comma-separated list to pass multiple values or use repeated keys.
- evalset_file: Evalset file (CSV) to use for evaluation, containing the columns `prompt`
- (`question`), `reference` (`ground truth response`), and optional additional
- columns based on the selected metrics. Either `dataset_name` or `evalset_file`
- must be provided, but not both.
+ evalset_file: Evalset file (CSV) to use for evaluation, containing the columns `prompt` (i.e.
+ question) and `reference` (i.e. ground truth response). Either `evalset_name` or
+ `evalset_file` must be provided, but not both.
- evalset_name: Name of the dataset to use for evaluation, created through the dataset API.
- Either `dataset_name` or `evalset_file` must be provided, but not both.
+ evalset_name: Name of the Dataset to use for evaluation, created through the
+ `/datasets/evaluate` API. Either `evalset_name` or `evalset_file` must be
+ provided, but not both.
- model_name: Model name of the tuned or aligned model to use. Defaults to the default model
- if not specified.
+ llm_model_id: ID of the model to evaluate. Uses the default model if not specified.
extra_headers: Send extra headers
@@ -224,7 +232,7 @@ async def create(
"metrics": metrics,
"evalset_file": evalset_file,
"evalset_name": evalset_name,
- "model_name": model_name,
+ "llm_model_id": llm_model_id,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["evalset_file"]])
diff --git a/src/contextual/resources/agents/evaluate/jobs.py b/src/contextual/resources/agents/evaluate/jobs.py
index a375048..f26341e 100644
--- a/src/contextual/resources/agents/evaluate/jobs.py
+++ b/src/contextual/resources/agents/evaluate/jobs.py
@@ -52,11 +52,11 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ListEvaluationJobsResponse:
"""
- Retrieve a list of `Evaluation` rounds run on a given `Agent`, including the
+ Retrieve a list of `Evaluation` jobs run for a given `Agent`, including the
`Evaluation`'s status and other metadata.
Args:
- agent_id: Agent ID for which to retrieve evaluations
+ agent_id: ID of agent for which to retrieve evaluation jobs
extra_headers: Send extra headers
@@ -89,12 +89,12 @@ def cancel(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> object:
"""
- Cancels an `Evaluation` round.
+ Cancels an `Evaluation` job if it is still in progress.
Args:
agent_id: Agent ID for which to cancel the evaluation
- job_id: Evaluation round ID to cancel
+ job_id: Evaluation job ID to cancel
extra_headers: Send extra headers
@@ -128,13 +128,20 @@ def metadata(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvaluationJobMetadata:
- """
- Get an `Evaluation` round's status and results.
+ """Get an `Evaluation` job's status and results.
+
+ There are six possible statuses:
+ 'pending', 'processing', 'retrying', 'completed', 'failed', 'cancelled'.
+
+ If the evaluation job has completed, you will see your evaluation `metrics` ,
+ `job_metadata`, and the `dataset_name` where your eval metrics and row-by-row
+ results are stored. You can use the `/datasets/evaluate` API to view the
+ specified `dataset`.
Args:
- agent_id: Agent ID for which to retrieve the evaluation
+ agent_id: ID of agent for which to retrieve evaluations
- job_id: Evaluation round ID to retrieve status and results for
+ job_id: Evaluation job ID to retrieve status and results for
extra_headers: Send extra headers
@@ -189,11 +196,11 @@ async def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ListEvaluationJobsResponse:
"""
- Retrieve a list of `Evaluation` rounds run on a given `Agent`, including the
+ Retrieve a list of `Evaluation` jobs run for a given `Agent`, including the
`Evaluation`'s status and other metadata.
Args:
- agent_id: Agent ID for which to retrieve evaluations
+ agent_id: ID of agent for which to retrieve evaluation jobs
extra_headers: Send extra headers
@@ -226,12 +233,12 @@ async def cancel(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> object:
"""
- Cancels an `Evaluation` round.
+ Cancels an `Evaluation` job if it is still in progress.
Args:
agent_id: Agent ID for which to cancel the evaluation
- job_id: Evaluation round ID to cancel
+ job_id: Evaluation job ID to cancel
extra_headers: Send extra headers
@@ -265,13 +272,20 @@ async def metadata(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvaluationJobMetadata:
- """
- Get an `Evaluation` round's status and results.
+ """Get an `Evaluation` job's status and results.
+
+ There are six possible statuses:
+ 'pending', 'processing', 'retrying', 'completed', 'failed', 'cancelled'.
+
+ If the evaluation job has completed, you will see your evaluation `metrics` ,
+ `job_metadata`, and the `dataset_name` where your eval metrics and row-by-row
+ results are stored. You can use the `/datasets/evaluate` API to view the
+ specified `dataset`.
Args:
- agent_id: Agent ID for which to retrieve the evaluation
+ agent_id: ID of agent for which to retrieve evaluations
- job_id: Evaluation round ID to retrieve status and results for
+ job_id: Evaluation job ID to retrieve status and results for
extra_headers: Send extra headers
diff --git a/src/contextual/resources/agents/query.py b/src/contextual/resources/agents/query.py
index ac9de6f..1b6e1a4 100644
--- a/src/contextual/resources/agents/query.py
+++ b/src/contextual/resources/agents/query.py
@@ -62,7 +62,7 @@ def create(
messages: Iterable[query_create_params.Message],
retrievals_only: bool | NotGiven = NOT_GIVEN,
conversation_id: str | NotGiven = NOT_GIVEN,
- model_id: str | NotGiven = NOT_GIVEN,
+ llm_model_id: str | NotGiven = NOT_GIVEN,
stream: bool | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -78,15 +78,18 @@ def create(
Args:
agent_id: Agent ID of the agent to query
- messages: Message objects in the conversation
+ messages: Messages sent so far in the conversation, ending in the latest user message. Add
+ multiple objects to provide conversation history. Last message in the list must
+ be a `user`-sent message (i.e. `role` equals `"user"`).
retrievals_only: Set to `true` to skip generation of the response.
- conversation_id: Conversation ID. An optional alternative to providing message history in the
- `messages` field. If provided, history in the `messages` field will be ignored.
+ conversation_id: An optional alternative to providing message history in the `messages` field. If
+ provided, all messages in the `messages` list prior to the latest user-sent
+ query will be ignored.
- model_id: Model ID of the specific fine-tuned or aligned model to use. Defaults to base
- model if not specified.
+ llm_model_id: Model ID of the specific fine-tuned or aligned LLM model to use. Defaults to
+ base model if not specified.
stream: Set to `true` to receive a streamed response
@@ -106,7 +109,7 @@ def create(
{
"messages": messages,
"conversation_id": conversation_id,
- "model_id": model_id,
+ "llm_model_id": llm_model_id,
"stream": stream,
},
query_create_params.QueryCreateParams,
@@ -149,15 +152,15 @@ def feedback(
`content_id`.
Args:
- agent_id: Agent ID of the agent to provide feedback for
+ agent_id: ID of the agent for which to provide feedback
feedback: Feedback to provide on the message. Set to "removed" to undo previously provided
feedback.
- message_id: ID of the message to provide feedback on.
+ message_id: ID of the message on which to provide feedback.
- content_id: Content ID to provide feedback on, if feedback is on retrieval. Set to None for
- generation feedback.
+ content_id: ID of the content on which to provide feedback, if feedback is on retrieval. Do
+ not set (or set to null) while providing generation feedback.
explanation: Optional explanation for the feedback.
@@ -203,15 +206,17 @@ def metrics(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> QueryMetricsResponse:
- """
- Get feedbacks a given agent.
+ """Returns usage and user-provided feedback data.
+
+ This information can be used for
+ data-driven improvements and optimization.
Args:
agent_id: Agent ID of the agent to get metrics for
- created_after: Filters messages that are created before specified timestamp.
+ created_after: Filters messages that are created after the specified timestamp.
- created_before: Filters messages that are created after specified timestamp.
+ created_before: Filters messages that are created before specified timestamp.
limit: Limits the number of messages to return.
@@ -261,13 +266,13 @@ def retrieval_info(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RetrievalInfoResponse:
"""
- Return content metadata of the contents used to generate response for a given
+ Return metadata of the contents used to generate the response for a given
message.
Args:
- agent_id: Agent ID of the agent which sent the provided message.
+ agent_id: ID of the agent which sent the provided message.
- message_id: Message ID for which the content metadata needs to be retrieved.
+ message_id: ID of the message for which the content metadata needs to be retrieved.
content_ids: List of content ids for which to get the metadata.
@@ -325,7 +330,7 @@ async def create(
messages: Iterable[query_create_params.Message],
retrievals_only: bool | NotGiven = NOT_GIVEN,
conversation_id: str | NotGiven = NOT_GIVEN,
- model_id: str | NotGiven = NOT_GIVEN,
+ llm_model_id: str | NotGiven = NOT_GIVEN,
stream: bool | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -341,15 +346,18 @@ async def create(
Args:
agent_id: Agent ID of the agent to query
- messages: Message objects in the conversation
+ messages: Messages sent so far in the conversation, ending in the latest user message. Add
+ multiple objects to provide conversation history. Last message in the list must
+ be a `user`-sent message (i.e. `role` equals `"user"`).
retrievals_only: Set to `true` to skip generation of the response.
- conversation_id: Conversation ID. An optional alternative to providing message history in the
- `messages` field. If provided, history in the `messages` field will be ignored.
+ conversation_id: An optional alternative to providing message history in the `messages` field. If
+ provided, all messages in the `messages` list prior to the latest user-sent
+ query will be ignored.
- model_id: Model ID of the specific fine-tuned or aligned model to use. Defaults to base
- model if not specified.
+ llm_model_id: Model ID of the specific fine-tuned or aligned LLM model to use. Defaults to
+ base model if not specified.
stream: Set to `true` to receive a streamed response
@@ -369,7 +377,7 @@ async def create(
{
"messages": messages,
"conversation_id": conversation_id,
- "model_id": model_id,
+ "llm_model_id": llm_model_id,
"stream": stream,
},
query_create_params.QueryCreateParams,
@@ -414,15 +422,15 @@ async def feedback(
`content_id`.
Args:
- agent_id: Agent ID of the agent to provide feedback for
+ agent_id: ID of the agent for which to provide feedback
feedback: Feedback to provide on the message. Set to "removed" to undo previously provided
feedback.
- message_id: ID of the message to provide feedback on.
+ message_id: ID of the message on which to provide feedback.
- content_id: Content ID to provide feedback on, if feedback is on retrieval. Set to None for
- generation feedback.
+ content_id: ID of the content on which to provide feedback, if feedback is on retrieval. Do
+ not set (or set to null) while providing generation feedback.
explanation: Optional explanation for the feedback.
@@ -468,15 +476,17 @@ async def metrics(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> QueryMetricsResponse:
- """
- Get feedbacks a given agent.
+ """Returns usage and user-provided feedback data.
+
+ This information can be used for
+ data-driven improvements and optimization.
Args:
agent_id: Agent ID of the agent to get metrics for
- created_after: Filters messages that are created before specified timestamp.
+ created_after: Filters messages that are created after the specified timestamp.
- created_before: Filters messages that are created after specified timestamp.
+ created_before: Filters messages that are created before specified timestamp.
limit: Limits the number of messages to return.
@@ -526,13 +536,13 @@ async def retrieval_info(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RetrievalInfoResponse:
"""
- Return content metadata of the contents used to generate response for a given
+ Return metadata of the contents used to generate the response for a given
message.
Args:
- agent_id: Agent ID of the agent which sent the provided message.
+ agent_id: ID of the agent which sent the provided message.
- message_id: Message ID for which the content metadata needs to be retrieved.
+ message_id: ID of the message for which the content metadata needs to be retrieved.
content_ids: List of content ids for which to get the metadata.
diff --git a/src/contextual/resources/agents/tune/jobs.py b/src/contextual/resources/agents/tune/jobs.py
index 952f095..edf2b71 100644
--- a/src/contextual/resources/agents/tune/jobs.py
+++ b/src/contextual/resources/agents/tune/jobs.py
@@ -52,11 +52,11 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ListTuneJobsResponse:
"""
- Retrieve a list of all tune jobs run on a specified `Agent`, including their
+ Retrieve a list of all tune jobs run for a specified `Agent`, including their
`status`, `evaluation_results`, and resultant `model_id`.
Args:
- agent_id: Agent ID of the agent to list tuning jobs for
+ agent_id: ID of the agent to list tuning jobs for
extra_headers: Send extra headers
@@ -88,14 +88,13 @@ def delete(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> object:
- """Cancel a specific tuning job.
+ """Cancel a tuning job if it is still in progress.
- Terminates the tuning job if it is still in
- progress. If the tuning job has already completed, the tuned model will not be
- deleted.
+ If the tuning job has already
+ completed, the tuned model will not be deleted.
Args:
- agent_id: Agent ID of the agent associated with the tuning job
+ agent_id: ID of the agent associated with the tuning job
job_id: ID of the tuning job to cancel
@@ -131,13 +130,16 @@ def metadata(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TuneJobMetadata:
- """Retrieve the status of a specific tuning job.
+ """
+ Retrieve the status of a specific tuning job.
- Fetches the current status and
- evaluation results, if available, for the specified tuning job.
+ After the tuning job is complete, the metadata associated with the tune job will
+ include evaluation results and a model ID. You can then deploy the tuned model
+ to the agent by editing its config with the tuned model ID and the "Edit Agent"
+ API (i.e. the `PUT /agents/{agent_id}` API).
Args:
- agent_id: Agent ID of the agent associated with the tuning job
+ agent_id: ID of the agent associated with the tuning job
job_id: ID of the tuning job to retrieve the status for
@@ -194,11 +196,11 @@ async def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ListTuneJobsResponse:
"""
- Retrieve a list of all tune jobs run on a specified `Agent`, including their
+ Retrieve a list of all tune jobs run for a specified `Agent`, including their
`status`, `evaluation_results`, and resultant `model_id`.
Args:
- agent_id: Agent ID of the agent to list tuning jobs for
+ agent_id: ID of the agent to list tuning jobs for
extra_headers: Send extra headers
@@ -230,14 +232,13 @@ async def delete(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> object:
- """Cancel a specific tuning job.
+ """Cancel a tuning job if it is still in progress.
- Terminates the tuning job if it is still in
- progress. If the tuning job has already completed, the tuned model will not be
- deleted.
+ If the tuning job has already
+ completed, the tuned model will not be deleted.
Args:
- agent_id: Agent ID of the agent associated with the tuning job
+ agent_id: ID of the agent associated with the tuning job
job_id: ID of the tuning job to cancel
@@ -273,13 +274,16 @@ async def metadata(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TuneJobMetadata:
- """Retrieve the status of a specific tuning job.
+ """
+ Retrieve the status of a specific tuning job.
- Fetches the current status and
- evaluation results, if available, for the specified tuning job.
+ After the tuning job is complete, the metadata associated with the tune job will
+ include evaluation results and a model ID. You can then deploy the tuned model
+ to the agent by editing its config with the tuned model ID and the "Edit Agent"
+ API (i.e. the `PUT /agents/{agent_id}` API).
Args:
- agent_id: Agent ID of the agent associated with the tuning job
+ agent_id: ID of the agent associated with the tuning job
job_id: ID of the tuning job to retrieve the status for
diff --git a/src/contextual/resources/agents/tune/models.py b/src/contextual/resources/agents/tune/models.py
index ae3895b..c911ee5 100644
--- a/src/contextual/resources/agents/tune/models.py
+++ b/src/contextual/resources/agents/tune/models.py
@@ -54,7 +54,7 @@ def list(
Retrieves a list of tuned models associated with the specified agent.
Args:
- agent_id: Agent ID of the agent from which to retrieve tuned models
+ agent_id: ID of the agent from which to retrieve tuned models
extra_headers: Send extra headers
@@ -110,7 +110,7 @@ async def list(
Retrieves a list of tuned models associated with the specified agent.
Args:
- agent_id: Agent ID of the agent from which to retrieve tuned models
+ agent_id: ID of the agent from which to retrieve tuned models
extra_headers: Send extra headers
diff --git a/src/contextual/resources/agents/tune/tune.py b/src/contextual/resources/agents/tune/tune.py
index 10ef2c5..a71dd32 100644
--- a/src/contextual/resources/agents/tune/tune.py
+++ b/src/contextual/resources/agents/tune/tune.py
@@ -86,12 +86,11 @@ def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CreateTuneResponse:
- """Create a tuning job for the specified `Agent`.
-
- Tuning jobs are asynchronous
- tasks to specialize your `Agent` to your specific domain or use case.
+ """
+ Create a tuning job for the specified `Agent` to specialize it to your specific
+ domain or use case.
- This API initiates a tuning specialization task using the provided
+ This API initiates an asynchronous tuning task using the provided
`training_file` and an optional `test_file`. If no `test_file` is provided, the
tuning job will hold out a portion of the `training_file` as the test set.
@@ -99,28 +98,27 @@ def create(
task through the `GET /tune/jobs/{job_id}/metadata` endpoint.
After the tuning job is complete, the metadata associated with the tune job will
- include evaluation results and a model ID. You can deploy the tuned model to the
- agent by editing its config with the "Edit Agent" API (i.e. the
- `PUT /agents/{agent_id}` API).
+ include evaluation results and a model ID. You can then deploy the tuned model
+ to the agent by editing its config with the tuned model ID and the "Edit Agent"
+ API (i.e. the `PUT /agents/{agent_id}` API).
Args:
- agent_id: Agent ID of the agent to tune
+ agent_id: ID of the agent to tune
training_file: Local path to the training data file.
The file should be in JSON array format, where each element of the array is a
JSON object represents a single training example. The four required fields are
- `guideline`, `prompt`, `response`, and `knowledge`.
+ `guideline`, `prompt`, `reference`, and `knowledge`.
- - `knowledge` field should be an array of strings, each string representing a
- piece of knowledge that the model should use to generate the response.
+ - `knowledge` (`list[str]`): Knowledge or retrievals used to generate the
+ reference response, as a list of string text chunks
- - `response` field should be the model's response to the prompt.
+ - `reference` field should be the model's response to the prompt.
- - `guideline` field should be a description of the expected response.
+ - `guideline` (`str): Guidelines or criteria for model output
- - `prompt` field should be a question or statement that the model should respond
- to.
+ - `prompt` (required, `string`): Prompt or question model should respond to.
Example:
@@ -129,7 +127,7 @@ def create(
{
"guideline": "The response should be accurate.",
"prompt": "What was last quarter's revenue?",
- "response": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
+ "reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
"knowledge": [
"Quarterly report: Q3 revenue was $1.2 million.",
"Quarterly report: Q2 revenue was $1.1 million.",
@@ -143,7 +141,7 @@ def create(
model_id: ID of an existing model to tune. Defaults to the agent's default model if not
specified.
- test_file: Optional. Local path to the test data file. The file should follow the same
+ test_file: Optional. Local path to the test data file. The test file should follow the same
format as the training data file.
extra_headers: Send extra headers
@@ -221,12 +219,11 @@ async def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CreateTuneResponse:
- """Create a tuning job for the specified `Agent`.
-
- Tuning jobs are asynchronous
- tasks to specialize your `Agent` to your specific domain or use case.
+ """
+ Create a tuning job for the specified `Agent` to specialize it to your specific
+ domain or use case.
- This API initiates a tuning specialization task using the provided
+ This API initiates an asynchronous tuning task using the provided
`training_file` and an optional `test_file`. If no `test_file` is provided, the
tuning job will hold out a portion of the `training_file` as the test set.
@@ -234,28 +231,27 @@ async def create(
task through the `GET /tune/jobs/{job_id}/metadata` endpoint.
After the tuning job is complete, the metadata associated with the tune job will
- include evaluation results and a model ID. You can deploy the tuned model to the
- agent by editing its config with the "Edit Agent" API (i.e. the
- `PUT /agents/{agent_id}` API).
+ include evaluation results and a model ID. You can then deploy the tuned model
+ to the agent by editing its config with the tuned model ID and the "Edit Agent"
+ API (i.e. the `PUT /agents/{agent_id}` API).
Args:
- agent_id: Agent ID of the agent to tune
+ agent_id: ID of the agent to tune
training_file: Local path to the training data file.
The file should be in JSON array format, where each element of the array is a
JSON object represents a single training example. The four required fields are
- `guideline`, `prompt`, `response`, and `knowledge`.
+ `guideline`, `prompt`, `reference`, and `knowledge`.
- - `knowledge` field should be an array of strings, each string representing a
- piece of knowledge that the model should use to generate the response.
+ - `knowledge` (`list[str]`): Knowledge or retrievals used to generate the
+ reference response, as a list of string text chunks
- - `response` field should be the model's response to the prompt.
+ - `reference` field should be the model's response to the prompt.
- - `guideline` field should be a description of the expected response.
+ - `guideline` (`str): Guidelines or criteria for model output
- - `prompt` field should be a question or statement that the model should respond
- to.
+ - `prompt` (required, `string`): Prompt or question model should respond to.
Example:
@@ -264,7 +260,7 @@ async def create(
{
"guideline": "The response should be accurate.",
"prompt": "What was last quarter's revenue?",
- "response": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
+ "reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
"knowledge": [
"Quarterly report: Q3 revenue was $1.2 million.",
"Quarterly report: Q2 revenue was $1.1 million.",
@@ -278,7 +274,7 @@ async def create(
model_id: ID of an existing model to tune. Defaults to the agent's default model if not
specified.
- test_file: Optional. Local path to the test data file. The file should follow the same
+ test_file: Optional. Local path to the test data file. The test file should follow the same
format as the training data file.
extra_headers: Send extra headers
diff --git a/src/contextual/resources/datastores/datastores.py b/src/contextual/resources/datastores/datastores.py
index b9e8bdd..666101b 100644
--- a/src/contextual/resources/datastores/datastores.py
+++ b/src/contextual/resources/datastores/datastores.py
@@ -77,9 +77,11 @@ def create(
Documents can be ingested into and
deleted from a `Datastore`.
- A `Datastore` can be linked to one or more `Agents` to provide data on which the
- `Agent` can ground its answers. This linkage of `Datastore` to `Agent` is done
- through the `Create Agent` or `Edit Agent` APIs.
+ A `Datastore` can be linked to one or more `Agents`, and conversely, an `Agent`
+ can be associated with one or more `Datastores` to ground its responses with
+ relevant data. This flexible many-to-many relationship allows `Agents` to draw
+ from multiple sources of information. This linkage of `Datastore` to `Agent` is
+ done through the `Create Agent` or `Edit Agent` APIs.
Args:
name: Name of the datastore
@@ -115,7 +117,7 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncDatastoresPage[Datastore]:
"""
- List all the `Datastores`.
+ Retrieve a list of `Datastores`.
Performs `cursor`-based pagination if the number of `Datastores` exceeds the
requested `limit`. The returned `cursor` can be passed to the next
@@ -178,7 +180,7 @@ def delete(
associated with the `Datastore`.
Args:
- datastore_id: Datastore ID of the datastore to delete
+ datastore_id: ID of the datastore to delete
extra_headers: Send extra headers
@@ -214,7 +216,7 @@ def metadata(
list of `Agents` which are currently configured to use the `Datastore`.
Args:
- datastore_id: Datastore ID of the datastore to get details of
+ datastore_id: ID of the datastore for which to get details
extra_headers: Send extra headers
@@ -277,9 +279,11 @@ async def create(
Documents can be ingested into and
deleted from a `Datastore`.
- A `Datastore` can be linked to one or more `Agents` to provide data on which the
- `Agent` can ground its answers. This linkage of `Datastore` to `Agent` is done
- through the `Create Agent` or `Edit Agent` APIs.
+ A `Datastore` can be linked to one or more `Agents`, and conversely, an `Agent`
+ can be associated with one or more `Datastores` to ground its responses with
+ relevant data. This flexible many-to-many relationship allows `Agents` to draw
+ from multiple sources of information. This linkage of `Datastore` to `Agent` is
+ done through the `Create Agent` or `Edit Agent` APIs.
Args:
name: Name of the datastore
@@ -315,7 +319,7 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[Datastore, AsyncDatastoresPage[Datastore]]:
"""
- List all the `Datastores`.
+ Retrieve a list of `Datastores`.
Performs `cursor`-based pagination if the number of `Datastores` exceeds the
requested `limit`. The returned `cursor` can be passed to the next
@@ -378,7 +382,7 @@ async def delete(
associated with the `Datastore`.
Args:
- datastore_id: Datastore ID of the datastore to delete
+ datastore_id: ID of the datastore to delete
extra_headers: Send extra headers
@@ -414,7 +418,7 @@ async def metadata(
list of `Agents` which are currently configured to use the `Datastore`.
Args:
- datastore_id: Datastore ID of the datastore to get details of
+ datastore_id: ID of the datastore for which to get details
extra_headers: Send extra headers
diff --git a/src/contextual/types/agent_metadata.py b/src/contextual/types/agent_metadata.py
index a539020..5f25666 100644
--- a/src/contextual/types/agent_metadata.py
+++ b/src/contextual/types/agent_metadata.py
@@ -18,11 +18,11 @@ class AgentMetadata(BaseModel):
"""Description of the agent"""
llm_model_id: Optional[str] = None
- """Optional model ID of a tuned model to use for generation.
+ """The model ID to use for generation.
- Model must have been tuned on this agent; tuned models cannot be used across
- agents. Uses default model if none is specified. Set to `default` to deactivate
- the tuned model and use the default model.
+ Tuned models can only be used for the agents on which they were tuned. If no
+ model is specified, the default model is used. Set to `default` to switch from a
+ tuned model to the default model.
"""
suggested_queries: Optional[List[str]] = None
diff --git a/src/contextual/types/agent_update_params.py b/src/contextual/types/agent_update_params.py
index 0059096..a0afd70 100644
--- a/src/contextual/types/agent_update_params.py
+++ b/src/contextual/types/agent_update_params.py
@@ -13,11 +13,11 @@ class AgentUpdateParams(TypedDict, total=False):
"""IDs of the datastore to associate with the agent."""
llm_model_id: str
- """Optional model ID of a tuned model to use for generation.
+ """The model ID to use for generation.
- Model must have been tuned on this agent; tuned models cannot be used across
- agents. Uses default model if none is specified. Set to `default` to deactivate
- the tuned model and use the default model.
+ Tuned models can only be used for the agents on which they were tuned. If no
+ model is specified, the default model is used. Set to `default` to switch from a
+ tuned model to the default model.
"""
suggested_queries: List[str]
diff --git a/src/contextual/types/agents/datasets/__init__.py b/src/contextual/types/agents/datasets/__init__.py
index 56eabef..3cfcda4 100644
--- a/src/contextual/types/agents/datasets/__init__.py
+++ b/src/contextual/types/agents/datasets/__init__.py
@@ -2,12 +2,7 @@
from __future__ import annotations
-from .tune_list_params import TuneListParams as TuneListParams
-from .tune_create_params import TuneCreateParams as TuneCreateParams
-from .tune_update_params import TuneUpdateParams as TuneUpdateParams
from .evaluate_list_params import EvaluateListParams as EvaluateListParams
-from .tune_metadata_params import TuneMetadataParams as TuneMetadataParams
-from .tune_retrieve_params import TuneRetrieveParams as TuneRetrieveParams
from .evaluate_create_params import EvaluateCreateParams as EvaluateCreateParams
from .evaluate_update_params import EvaluateUpdateParams as EvaluateUpdateParams
from .evaluate_metadata_params import EvaluateMetadataParams as EvaluateMetadataParams
diff --git a/src/contextual/types/agents/datasets/tune_create_params.py b/src/contextual/types/agents/datasets/tune_create_params.py
deleted file mode 100644
index 3315a8d..0000000
--- a/src/contextual/types/agents/datasets/tune_create_params.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from ...._types import FileTypes
-
-__all__ = ["TuneCreateParams"]
-
-
-class TuneCreateParams(TypedDict, total=False):
- dataset_name: Required[str]
- """Name of the tune dataset"""
-
- dataset_type: Required[Literal["tuning_set"]]
- """Type of tune dataset which determines its schema and validation rules."""
-
- file: Required[FileTypes]
- """JSONL file containing the tune dataset"""
diff --git a/src/contextual/types/agents/datasets/tune_list_params.py b/src/contextual/types/agents/datasets/tune_list_params.py
deleted file mode 100644
index 59702ad..0000000
--- a/src/contextual/types/agents/datasets/tune_list_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["TuneListParams"]
-
-
-class TuneListParams(TypedDict, total=False):
- dataset_name: str
- """Optional dataset name to filter the results by.
-
- If provided, only versions from that dataset are listed.
- """
diff --git a/src/contextual/types/agents/datasets/tune_metadata_params.py b/src/contextual/types/agents/datasets/tune_metadata_params.py
deleted file mode 100644
index a935263..0000000
--- a/src/contextual/types/agents/datasets/tune_metadata_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["TuneMetadataParams"]
-
-
-class TuneMetadataParams(TypedDict, total=False):
- agent_id: Required[str]
- """Agent ID associated with the tune dataset"""
-
- version: str
- """Version number of the dataset. Defaults to the latest version if not specified."""
diff --git a/src/contextual/types/agents/datasets/tune_retrieve_params.py b/src/contextual/types/agents/datasets/tune_retrieve_params.py
deleted file mode 100644
index 2192e7e..0000000
--- a/src/contextual/types/agents/datasets/tune_retrieve_params.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["TuneRetrieveParams"]
-
-
-class TuneRetrieveParams(TypedDict, total=False):
- agent_id: Required[str]
- """Agent ID associated with the tune dataset"""
-
- batch_size: int
- """Batch size for processing"""
-
- version: str
- """Version number of the tune dataset to retrieve.
-
- Defaults to the latest version if not specified.
- """
diff --git a/src/contextual/types/agents/datasets/tune_update_params.py b/src/contextual/types/agents/datasets/tune_update_params.py
deleted file mode 100644
index f3a4d59..0000000
--- a/src/contextual/types/agents/datasets/tune_update_params.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from ...._types import FileTypes
-
-__all__ = ["TuneUpdateParams"]
-
-
-class TuneUpdateParams(TypedDict, total=False):
- agent_id: Required[str]
- """Agent ID associated with the tune dataset"""
-
- dataset_type: Required[Literal["tuning_set"]]
- """Type of tune dataset which determines its schema and validation rules.
-
- Must match the `dataset_type` used at dataset creation time.
- """
-
- file: Required[FileTypes]
- """JSONL file containing the entries to append to the tune dataset"""
diff --git a/src/contextual/types/agents/evaluate_create_params.py b/src/contextual/types/agents/evaluate_create_params.py
index 6c50813..def12aa 100644
--- a/src/contextual/types/agents/evaluate_create_params.py
+++ b/src/contextual/types/agents/evaluate_create_params.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import List
+from typing import List, Optional
from typing_extensions import Literal, Required, TypedDict
from ..._types import FileTypes
@@ -12,28 +12,21 @@
class EvaluateCreateParams(TypedDict, total=False):
metrics: Required[List[Literal["equivalence", "groundedness"]]]
- """List of metrics to use.
-
- Supported metrics are `equivalence` and `groundedness`. Use comma-separated list
- to pass multiple values or use repeated keys.
- """
+ """List of metrics to use. Supported metrics are `equivalence` and `groundedness`."""
evalset_file: FileTypes
- """
- Evalset file (CSV) to use for evaluation, containing the columns `prompt`
- (`question`), `reference` (`ground truth response`), and optional additional
- columns based on the selected metrics. Either `dataset_name` or `evalset_file`
- must be provided, but not both.
+ """Evalset file (CSV) to use for evaluation, containing the columns `prompt` (i.e.
+
+ question) and `reference` (i.e. ground truth response). Either `evalset_name` or
+ `evalset_file` must be provided, but not both.
"""
evalset_name: str
- """Name of the dataset to use for evaluation, created through the dataset API.
-
- Either `dataset_name` or `evalset_file` must be provided, but not both.
"""
-
- model_name: str
- """Model name of the tuned or aligned model to use.
-
- Defaults to the default model if not specified.
+ Name of the Dataset to use for evaluation, created through the
+ `/datasets/evaluate` API. Either `evalset_name` or `evalset_file` must be
+ provided, but not both.
"""
+
+ llm_model_id: Optional[str]
+ """ID of the model to evaluate. Uses the default model if not specified."""
diff --git a/src/contextual/types/agents/query_create_params.py b/src/contextual/types/agents/query_create_params.py
index b5db07c..7c7e7eb 100644
--- a/src/contextual/types/agents/query_create_params.py
+++ b/src/contextual/types/agents/query_create_params.py
@@ -10,20 +10,24 @@
class QueryCreateParams(TypedDict, total=False):
messages: Required[Iterable[Message]]
- """Message objects in the conversation"""
+ """Messages sent so far in the conversation, ending in the latest user message.
+
+ Add multiple objects to provide conversation history. Last message in the list
+ must be a `user`-sent message (i.e. `role` equals `"user"`).
+ """
retrievals_only: bool
"""Set to `true` to skip generation of the response."""
conversation_id: str
- """Conversation ID.
+ """An optional alternative to providing message history in the `messages` field.
- An optional alternative to providing message history in the `messages` field. If
- provided, history in the `messages` field will be ignored.
+ If provided, all messages in the `messages` list prior to the latest user-sent
+ query will be ignored.
"""
- model_id: str
- """Model ID of the specific fine-tuned or aligned model to use.
+ llm_model_id: str
+ """Model ID of the specific fine-tuned or aligned LLM model to use.
Defaults to base model if not specified.
"""
diff --git a/src/contextual/types/agents/query_feedback_params.py b/src/contextual/types/agents/query_feedback_params.py
index 8b2d306..44f6b29 100644
--- a/src/contextual/types/agents/query_feedback_params.py
+++ b/src/contextual/types/agents/query_feedback_params.py
@@ -15,12 +15,12 @@ class QueryFeedbackParams(TypedDict, total=False):
"""
message_id: Required[str]
- """ID of the message to provide feedback on."""
+ """ID of the message on which to provide feedback."""
content_id: str
- """Content ID to provide feedback on, if feedback is on retrieval.
+ """ID of the content on which to provide feedback, if feedback is on retrieval.
- Set to None for generation feedback.
+ Do not set (or set to null) while providing generation feedback.
"""
explanation: str
diff --git a/src/contextual/types/agents/query_metrics_params.py b/src/contextual/types/agents/query_metrics_params.py
index e445199..b3483cb 100644
--- a/src/contextual/types/agents/query_metrics_params.py
+++ b/src/contextual/types/agents/query_metrics_params.py
@@ -13,10 +13,10 @@
class QueryMetricsParams(TypedDict, total=False):
created_after: Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]
- """Filters messages that are created before specified timestamp."""
+ """Filters messages that are created after the specified timestamp."""
created_before: Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]
- """Filters messages that are created after specified timestamp."""
+ """Filters messages that are created before specified timestamp."""
limit: int
"""Limits the number of messages to return."""
diff --git a/src/contextual/types/agents/query_retrieval_info_params.py b/src/contextual/types/agents/query_retrieval_info_params.py
index 54b4db6..14d5fef 100644
--- a/src/contextual/types/agents/query_retrieval_info_params.py
+++ b/src/contextual/types/agents/query_retrieval_info_params.py
@@ -10,7 +10,7 @@
class QueryRetrievalInfoParams(TypedDict, total=False):
agent_id: Required[str]
- """Agent ID of the agent which sent the provided message."""
+ """ID of the agent which sent the provided message."""
content_ids: Required[List[str]]
"""List of content ids for which to get the metadata."""
diff --git a/src/contextual/types/agents/tune/list_tune_jobs_response.py b/src/contextual/types/agents/tune/list_tune_jobs_response.py
index 9c6146c..d1fa94a 100644
--- a/src/contextual/types/agents/tune/list_tune_jobs_response.py
+++ b/src/contextual/types/agents/tune/list_tune_jobs_response.py
@@ -13,7 +13,11 @@ class Job(BaseModel):
"""ID of the tune job"""
job_status: str
- """Status of the tune job"""
+ """Status of the tune job.
+
+ There are four possible statuses: 'failed', 'pending', 'processing' and
+ 'completed'.
+ """
evaluation_results: Optional[Dict[str, float]] = None
"""
@@ -23,7 +27,7 @@ class Job(BaseModel):
"""
model_id: Optional[str] = None
- """ID of the trained model.
+ """ID of the tuned model.
Omitted if the tuning job failed or is still in progress.
"""
diff --git a/src/contextual/types/agents/tune/list_tune_models_response.py b/src/contextual/types/agents/tune/list_tune_models_response.py
index 9130417..963655b 100644
--- a/src/contextual/types/agents/tune/list_tune_models_response.py
+++ b/src/contextual/types/agents/tune/list_tune_models_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
+from typing import List
from datetime import datetime
from ...._compat import PYDANTIC_V2, ConfigDict
@@ -25,14 +25,8 @@ class Model(BaseModel):
class ListTuneModelsResponse(BaseModel):
- has_more: bool
- """Whether there are more models to retrieve"""
-
models: List[Model]
"""List of registered models for the application"""
total: int
"""Total number of models associated with the application"""
-
- next_after: Optional[str] = None
- """Identifier of the last model from the current request, used for pagination"""
diff --git a/src/contextual/types/agents/tune/tune_job_metadata.py b/src/contextual/types/agents/tune/tune_job_metadata.py
index b72f2f8..a64be7c 100644
--- a/src/contextual/types/agents/tune/tune_job_metadata.py
+++ b/src/contextual/types/agents/tune/tune_job_metadata.py
@@ -10,13 +10,17 @@
class TuneJobMetadata(BaseModel):
job_status: str
- """Status of the tune job"""
+ """Status of the tune job.
+
+ There are four possible statuses: 'failed', 'pending', 'processing',
+ 'completed'.
+ """
evaluation_results: Optional[Dict[str, float]] = None
"""
- Evaluation results of the tuned model, represented as an object mapping metric
- names (strings) to their scores (floats). Omitted if the tuning job failed or is
- still in progress.
+ Evaluation results of the tuned model, represented as a dictionary mapping
+ metric names (strings) to their scores (floats). Omitted if the tuning job
+ failed or is still in progress.
"""
model_id: Optional[str] = None
diff --git a/src/contextual/types/agents/tune_create_params.py b/src/contextual/types/agents/tune_create_params.py
index 7220c49..3124f9b 100644
--- a/src/contextual/types/agents/tune_create_params.py
+++ b/src/contextual/types/agents/tune_create_params.py
@@ -15,17 +15,16 @@ class TuneCreateParams(TypedDict, total=False):
The file should be in JSON array format, where each element of the array is a
JSON object represents a single training example. The four required fields are
- `guideline`, `prompt`, `response`, and `knowledge`.
+ `guideline`, `prompt`, `reference`, and `knowledge`.
- - `knowledge` field should be an array of strings, each string representing a
- piece of knowledge that the model should use to generate the response.
+ - `knowledge` (`list[str]`): Knowledge or retrievals used to generate the
+ reference response, as a list of string text chunks
- - `response` field should be the model's response to the prompt.
+ - `reference` field should be the model's response to the prompt.
- - `guideline` field should be a description of the expected response.
+ - `guideline` (`str): Guidelines or criteria for model output
- - `prompt` field should be a question or statement that the model should respond
- to.
+ - `prompt` (required, `string`): Prompt or question model should respond to.
Example:
@@ -34,7 +33,7 @@ class TuneCreateParams(TypedDict, total=False):
{
"guideline": "The response should be accurate.",
"prompt": "What was last quarter's revenue?",
- "response": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
+ "reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
"knowledge": [
"Quarterly report: Q3 revenue was $1.2 million.",
"Quarterly report: Q2 revenue was $1.1 million.",
@@ -55,6 +54,6 @@ class TuneCreateParams(TypedDict, total=False):
test_file: FileTypes
"""Optional.
- Local path to the test data file. The file should follow the same format as the
- training data file.
+ Local path to the test data file. The test file should follow the same format as
+ the training data file.
"""
diff --git a/src/contextual/types/list_agents_response.py b/src/contextual/types/list_agents_response.py
index c7551f8..07a5340 100644
--- a/src/contextual/types/list_agents_response.py
+++ b/src/contextual/types/list_agents_response.py
@@ -12,7 +12,7 @@ class ListAgentsResponse(BaseModel):
total_count: int
"""Total number of available agents"""
- data: Optional[List[Agent]] = None
+ agents: Optional[List[Agent]] = None
"""List of active agents"""
next_cursor: Optional[str] = None
diff --git a/tests/api_resources/agents/datasets/test_tune.py b/tests/api_resources/agents/datasets/test_tune.py
deleted file mode 100644
index 704dac4..0000000
--- a/tests/api_resources/agents/datasets/test_tune.py
+++ /dev/null
@@ -1,716 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import httpx
-import pytest
-from respx import MockRouter
-
-from contextual import ContextualAI, AsyncContextualAI
-from tests.utils import assert_matches_type
-from contextual._response import (
- BinaryAPIResponse,
- AsyncBinaryAPIResponse,
- StreamedBinaryAPIResponse,
- AsyncStreamedBinaryAPIResponse,
-)
-from contextual.types.agents import DatasetMetadata, ListDatasetsResponse, CreateDatasetResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestTune:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_create(self, client: ContextualAI) -> None:
- tune = client.agents.datasets.tune.create(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_name="dataset_name",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- @parametrize
- def test_raw_response_create(self, client: ContextualAI) -> None:
- response = client.agents.datasets.tune.with_raw_response.create(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_name="dataset_name",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = response.parse()
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- @parametrize
- def test_streaming_response_create(self, client: ContextualAI) -> None:
- with client.agents.datasets.tune.with_streaming_response.create(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_name="dataset_name",
- dataset_type="tuning_set",
- file=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = response.parse()
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_create(self, client: ContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- client.agents.datasets.tune.with_raw_response.create(
- agent_id="",
- dataset_name="dataset_name",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_method_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None:
- respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
- return_value=httpx.Response(200, json={"foo": "bar"})
- )
- tune = client.agents.datasets.tune.retrieve(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
- assert tune.is_closed
- assert tune.json() == {"foo": "bar"}
- assert cast(Any, tune.is_closed) is True
- assert isinstance(tune, BinaryAPIResponse)
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_method_retrieve_with_all_params(self, client: ContextualAI, respx_mock: MockRouter) -> None:
- respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
- return_value=httpx.Response(200, json={"foo": "bar"})
- )
- tune = client.agents.datasets.tune.retrieve(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- batch_size=1,
- version="version",
- )
- assert tune.is_closed
- assert tune.json() == {"foo": "bar"}
- assert cast(Any, tune.is_closed) is True
- assert isinstance(tune, BinaryAPIResponse)
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_raw_response_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None:
- respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
- return_value=httpx.Response(200, json={"foo": "bar"})
- )
-
- tune = client.agents.datasets.tune.with_raw_response.retrieve(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- assert tune.is_closed is True
- assert tune.http_request.headers.get("X-Stainless-Lang") == "python"
- assert tune.json() == {"foo": "bar"}
- assert isinstance(tune, BinaryAPIResponse)
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_streaming_response_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None:
- respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
- return_value=httpx.Response(200, json={"foo": "bar"})
- )
- with client.agents.datasets.tune.with_streaming_response.retrieve(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- ) as tune:
- assert not tune.is_closed
- assert tune.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assert tune.json() == {"foo": "bar"}
- assert cast(Any, tune.is_closed) is True
- assert isinstance(tune, StreamedBinaryAPIResponse)
-
- assert cast(Any, tune.is_closed) is True
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_path_params_retrieve(self, client: ContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- client.agents.datasets.tune.with_raw_response.retrieve(
- dataset_name="dataset_name",
- agent_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
- client.agents.datasets.tune.with_raw_response.retrieve(
- dataset_name="",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- @parametrize
- def test_method_update(self, client: ContextualAI) -> None:
- tune = client.agents.datasets.tune.update(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- @parametrize
- def test_raw_response_update(self, client: ContextualAI) -> None:
- response = client.agents.datasets.tune.with_raw_response.update(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = response.parse()
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- @parametrize
- def test_streaming_response_update(self, client: ContextualAI) -> None:
- with client.agents.datasets.tune.with_streaming_response.update(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_type="tuning_set",
- file=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = response.parse()
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_update(self, client: ContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- client.agents.datasets.tune.with_raw_response.update(
- dataset_name="dataset_name",
- agent_id="",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
- client.agents.datasets.tune.with_raw_response.update(
- dataset_name="",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- @parametrize
- def test_method_list(self, client: ContextualAI) -> None:
- tune = client.agents.datasets.tune.list(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
- assert_matches_type(ListDatasetsResponse, tune, path=["response"])
-
- @parametrize
- def test_method_list_with_all_params(self, client: ContextualAI) -> None:
- tune = client.agents.datasets.tune.list(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_name="dataset_name",
- )
- assert_matches_type(ListDatasetsResponse, tune, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: ContextualAI) -> None:
- response = client.agents.datasets.tune.with_raw_response.list(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = response.parse()
- assert_matches_type(ListDatasetsResponse, tune, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: ContextualAI) -> None:
- with client.agents.datasets.tune.with_streaming_response.list(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = response.parse()
- assert_matches_type(ListDatasetsResponse, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_list(self, client: ContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- client.agents.datasets.tune.with_raw_response.list(
- agent_id="",
- )
-
- @parametrize
- def test_method_delete(self, client: ContextualAI) -> None:
- tune = client.agents.datasets.tune.delete(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
- assert_matches_type(object, tune, path=["response"])
-
- @parametrize
- def test_raw_response_delete(self, client: ContextualAI) -> None:
- response = client.agents.datasets.tune.with_raw_response.delete(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = response.parse()
- assert_matches_type(object, tune, path=["response"])
-
- @parametrize
- def test_streaming_response_delete(self, client: ContextualAI) -> None:
- with client.agents.datasets.tune.with_streaming_response.delete(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = response.parse()
- assert_matches_type(object, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_delete(self, client: ContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- client.agents.datasets.tune.with_raw_response.delete(
- dataset_name="dataset_name",
- agent_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
- client.agents.datasets.tune.with_raw_response.delete(
- dataset_name="",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- @parametrize
- def test_method_metadata(self, client: ContextualAI) -> None:
- tune = client.agents.datasets.tune.metadata(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
- assert_matches_type(DatasetMetadata, tune, path=["response"])
-
- @parametrize
- def test_method_metadata_with_all_params(self, client: ContextualAI) -> None:
- tune = client.agents.datasets.tune.metadata(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- version="version",
- )
- assert_matches_type(DatasetMetadata, tune, path=["response"])
-
- @parametrize
- def test_raw_response_metadata(self, client: ContextualAI) -> None:
- response = client.agents.datasets.tune.with_raw_response.metadata(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = response.parse()
- assert_matches_type(DatasetMetadata, tune, path=["response"])
-
- @parametrize
- def test_streaming_response_metadata(self, client: ContextualAI) -> None:
- with client.agents.datasets.tune.with_streaming_response.metadata(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = response.parse()
- assert_matches_type(DatasetMetadata, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_metadata(self, client: ContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- client.agents.datasets.tune.with_raw_response.metadata(
- dataset_name="dataset_name",
- agent_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
- client.agents.datasets.tune.with_raw_response.metadata(
- dataset_name="",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
-
-class TestAsyncTune:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- async def test_method_create(self, async_client: AsyncContextualAI) -> None:
- tune = await async_client.agents.datasets.tune.create(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_name="dataset_name",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncContextualAI) -> None:
- response = await async_client.agents.datasets.tune.with_raw_response.create(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_name="dataset_name",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = await response.parse()
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncContextualAI) -> None:
- async with async_client.agents.datasets.tune.with_streaming_response.create(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_name="dataset_name",
- dataset_type="tuning_set",
- file=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = await response.parse()
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_create(self, async_client: AsyncContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.create(
- agent_id="",
- dataset_name="dataset_name",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_method_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None:
- respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
- return_value=httpx.Response(200, json={"foo": "bar"})
- )
- tune = await async_client.agents.datasets.tune.retrieve(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
- assert tune.is_closed
- assert await tune.json() == {"foo": "bar"}
- assert cast(Any, tune.is_closed) is True
- assert isinstance(tune, AsyncBinaryAPIResponse)
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_method_retrieve_with_all_params(
- self, async_client: AsyncContextualAI, respx_mock: MockRouter
- ) -> None:
- respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
- return_value=httpx.Response(200, json={"foo": "bar"})
- )
- tune = await async_client.agents.datasets.tune.retrieve(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- batch_size=1,
- version="version",
- )
- assert tune.is_closed
- assert await tune.json() == {"foo": "bar"}
- assert cast(Any, tune.is_closed) is True
- assert isinstance(tune, AsyncBinaryAPIResponse)
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_raw_response_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None:
- respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
- return_value=httpx.Response(200, json={"foo": "bar"})
- )
-
- tune = await async_client.agents.datasets.tune.with_raw_response.retrieve(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- assert tune.is_closed is True
- assert tune.http_request.headers.get("X-Stainless-Lang") == "python"
- assert await tune.json() == {"foo": "bar"}
- assert isinstance(tune, AsyncBinaryAPIResponse)
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_streaming_response_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None:
- respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
- return_value=httpx.Response(200, json={"foo": "bar"})
- )
- async with async_client.agents.datasets.tune.with_streaming_response.retrieve(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- ) as tune:
- assert not tune.is_closed
- assert tune.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assert await tune.json() == {"foo": "bar"}
- assert cast(Any, tune.is_closed) is True
- assert isinstance(tune, AsyncStreamedBinaryAPIResponse)
-
- assert cast(Any, tune.is_closed) is True
-
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_path_params_retrieve(self, async_client: AsyncContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.retrieve(
- dataset_name="dataset_name",
- agent_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.retrieve(
- dataset_name="",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- @parametrize
- async def test_method_update(self, async_client: AsyncContextualAI) -> None:
- tune = await async_client.agents.datasets.tune.update(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncContextualAI) -> None:
- response = await async_client.agents.datasets.tune.with_raw_response.update(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = await response.parse()
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncContextualAI) -> None:
- async with async_client.agents.datasets.tune.with_streaming_response.update(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_type="tuning_set",
- file=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = await response.parse()
- assert_matches_type(CreateDatasetResponse, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_update(self, async_client: AsyncContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.update(
- dataset_name="dataset_name",
- agent_id="",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.update(
- dataset_name="",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_type="tuning_set",
- file=b"raw file contents",
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncContextualAI) -> None:
- tune = await async_client.agents.datasets.tune.list(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
- assert_matches_type(ListDatasetsResponse, tune, path=["response"])
-
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncContextualAI) -> None:
- tune = await async_client.agents.datasets.tune.list(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- dataset_name="dataset_name",
- )
- assert_matches_type(ListDatasetsResponse, tune, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None:
- response = await async_client.agents.datasets.tune.with_raw_response.list(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = await response.parse()
- assert_matches_type(ListDatasetsResponse, tune, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None:
- async with async_client.agents.datasets.tune.with_streaming_response.list(
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = await response.parse()
- assert_matches_type(ListDatasetsResponse, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_list(self, async_client: AsyncContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.list(
- agent_id="",
- )
-
- @parametrize
- async def test_method_delete(self, async_client: AsyncContextualAI) -> None:
- tune = await async_client.agents.datasets.tune.delete(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
- assert_matches_type(object, tune, path=["response"])
-
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> None:
- response = await async_client.agents.datasets.tune.with_raw_response.delete(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = await response.parse()
- assert_matches_type(object, tune, path=["response"])
-
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncContextualAI) -> None:
- async with async_client.agents.datasets.tune.with_streaming_response.delete(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = await response.parse()
- assert_matches_type(object, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.delete(
- dataset_name="dataset_name",
- agent_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.delete(
- dataset_name="",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- @parametrize
- async def test_method_metadata(self, async_client: AsyncContextualAI) -> None:
- tune = await async_client.agents.datasets.tune.metadata(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
- assert_matches_type(DatasetMetadata, tune, path=["response"])
-
- @parametrize
- async def test_method_metadata_with_all_params(self, async_client: AsyncContextualAI) -> None:
- tune = await async_client.agents.datasets.tune.metadata(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- version="version",
- )
- assert_matches_type(DatasetMetadata, tune, path=["response"])
-
- @parametrize
- async def test_raw_response_metadata(self, async_client: AsyncContextualAI) -> None:
- response = await async_client.agents.datasets.tune.with_raw_response.metadata(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tune = await response.parse()
- assert_matches_type(DatasetMetadata, tune, path=["response"])
-
- @parametrize
- async def test_streaming_response_metadata(self, async_client: AsyncContextualAI) -> None:
- async with async_client.agents.datasets.tune.with_streaming_response.metadata(
- dataset_name="dataset_name",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tune = await response.parse()
- assert_matches_type(DatasetMetadata, tune, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_metadata(self, async_client: AsyncContextualAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.metadata(
- dataset_name="dataset_name",
- agent_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
- await async_client.agents.datasets.tune.with_raw_response.metadata(
- dataset_name="",
- agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- )
diff --git a/tests/api_resources/agents/test_evaluate.py b/tests/api_resources/agents/test_evaluate.py
index 35937db..512abf8 100644
--- a/tests/api_resources/agents/test_evaluate.py
+++ b/tests/api_resources/agents/test_evaluate.py
@@ -32,7 +32,7 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None:
metrics=["equivalence"],
evalset_file=b"raw file contents",
evalset_name="evalset_name",
- model_name="model_name",
+ llm_model_id="llm_model_id",
)
assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"])
@@ -89,7 +89,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual
metrics=["equivalence"],
evalset_file=b"raw file contents",
evalset_name="evalset_name",
- model_name="model_name",
+ llm_model_id="llm_model_id",
)
assert_matches_type(CreateEvaluationResponse, evaluate, path=["response"])
diff --git a/tests/api_resources/agents/test_query.py b/tests/api_resources/agents/test_query.py
index 140ad5b..2f690c0 100644
--- a/tests/api_resources/agents/test_query.py
+++ b/tests/api_resources/agents/test_query.py
@@ -47,7 +47,7 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None:
],
retrievals_only=True,
conversation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- model_id="model_id",
+ llm_model_id="llm_model_id",
stream=True,
)
assert_matches_type(QueryResponse, query, path=["response"])
@@ -289,7 +289,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual
],
retrievals_only=True,
conversation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- model_id="model_id",
+ llm_model_id="llm_model_id",
stream=True,
)
assert_matches_type(QueryResponse, query, path=["response"])
diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py
index 30eb7db..adb73a6 100644
--- a/tests/api_resources/test_agents.py
+++ b/tests/api_resources/test_agents.py
@@ -14,7 +14,7 @@
AgentMetadata,
CreateAgentOutput,
)
-from contextual.pagination import SyncPage, AsyncPage
+from contextual.pagination import SyncAgentsPage, AsyncAgentsPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -116,7 +116,7 @@ def test_path_params_update(self, client: ContextualAI) -> None:
@parametrize
def test_method_list(self, client: ContextualAI) -> None:
agent = client.agents.list()
- assert_matches_type(SyncPage[Agent], agent, path=["response"])
+ assert_matches_type(SyncAgentsPage[Agent], agent, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: ContextualAI) -> None:
@@ -124,7 +124,7 @@ def test_method_list_with_all_params(self, client: ContextualAI) -> None:
cursor="cursor",
limit=1,
)
- assert_matches_type(SyncPage[Agent], agent, path=["response"])
+ assert_matches_type(SyncAgentsPage[Agent], agent, path=["response"])
@parametrize
def test_raw_response_list(self, client: ContextualAI) -> None:
@@ -133,7 +133,7 @@ def test_raw_response_list(self, client: ContextualAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = response.parse()
- assert_matches_type(SyncPage[Agent], agent, path=["response"])
+ assert_matches_type(SyncAgentsPage[Agent], agent, path=["response"])
@parametrize
def test_streaming_response_list(self, client: ContextualAI) -> None:
@@ -142,7 +142,7 @@ def test_streaming_response_list(self, client: ContextualAI) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = response.parse()
- assert_matches_type(SyncPage[Agent], agent, path=["response"])
+ assert_matches_type(SyncAgentsPage[Agent], agent, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -320,7 +320,7 @@ async def test_path_params_update(self, async_client: AsyncContextualAI) -> None
@parametrize
async def test_method_list(self, async_client: AsyncContextualAI) -> None:
agent = await async_client.agents.list()
- assert_matches_type(AsyncPage[Agent], agent, path=["response"])
+ assert_matches_type(AsyncAgentsPage[Agent], agent, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncContextualAI) -> None:
@@ -328,7 +328,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncContextualAI
cursor="cursor",
limit=1,
)
- assert_matches_type(AsyncPage[Agent], agent, path=["response"])
+ assert_matches_type(AsyncAgentsPage[Agent], agent, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None:
@@ -337,7 +337,7 @@ async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = await response.parse()
- assert_matches_type(AsyncPage[Agent], agent, path=["response"])
+ assert_matches_type(AsyncAgentsPage[Agent], agent, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None:
@@ -346,7 +346,7 @@ async def test_streaming_response_list(self, async_client: AsyncContextualAI) ->
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = await response.parse()
- assert_matches_type(AsyncPage[Agent], agent, path=["response"])
+ assert_matches_type(AsyncAgentsPage[Agent], agent, path=["response"])
assert cast(Any, response.is_closed) is True
From 990c359ab3f2e6c3f29fc07e158c895159e8cc94 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 15 Jan 2025 08:10:14 +0000
Subject: [PATCH 2/4] feat(api): update via SDK Studio (#18)
---
src/contextual/pagination.py | 42 ------------------------------------
1 file changed, 42 deletions(-)
diff --git a/src/contextual/pagination.py b/src/contextual/pagination.py
index 13e3dcb..dbe57c8 100644
--- a/src/contextual/pagination.py
+++ b/src/contextual/pagination.py
@@ -12,8 +12,6 @@
"AsyncDocumentsPage",
"SyncAgentsPage",
"AsyncAgentsPage",
- "SyncPage",
- "AsyncPage",
]
_T = TypeVar("_T")
@@ -137,43 +135,3 @@ def next_page_info(self) -> Optional[PageInfo]:
return None
return PageInfo(params={"cursor": next_cursor})
-
-
-class SyncPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
- data: List[_T]
- next_cursor: Optional[str] = None
-
- @override
- def _get_page_items(self) -> List[_T]:
- data = self.data
- if not data:
- return []
- return data
-
- @override
- def next_page_info(self) -> Optional[PageInfo]:
- next_cursor = self.next_cursor
- if not next_cursor:
- return None
-
- return PageInfo(params={"cursor": next_cursor})
-
-
-class AsyncPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
- data: List[_T]
- next_cursor: Optional[str] = None
-
- @override
- def _get_page_items(self) -> List[_T]:
- data = self.data
- if not data:
- return []
- return data
-
- @override
- def next_page_info(self) -> Optional[PageInfo]:
- next_cursor = self.next_cursor
- if not next_cursor:
- return None
-
- return PageInfo(params={"cursor": next_cursor})
From 4eeaea95542c416d4dfa0d00e0304c3f88c2be79 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 15 Jan 2025 08:33:27 +0000
Subject: [PATCH 3/4] feat(api): update via SDK Studio (#19)
---
api.md | 2 +-
src/contextual/pagination.py | 8 ++++----
src/contextual/resources/agents/agents.py | 10 +++++-----
tests/api_resources/test_agents.py | 18 +++++++++---------
4 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/api.md b/api.md
index e479a2d..6e65bff 100644
--- a/api.md
+++ b/api.md
@@ -58,7 +58,7 @@ Methods:
- client.agents.create(\*\*params) -> CreateAgentOutput
- client.agents.update(agent_id, \*\*params) -> object
-- client.agents.list(\*\*params) -> SyncAgentsPage[Agent]
+- client.agents.list(\*\*params) -> SyncPage[Agent]
- client.agents.delete(agent_id) -> object
- client.agents.metadata(agent_id) -> AgentMetadata
diff --git a/src/contextual/pagination.py b/src/contextual/pagination.py
index dbe57c8..44d93fc 100644
--- a/src/contextual/pagination.py
+++ b/src/contextual/pagination.py
@@ -10,8 +10,8 @@
"AsyncDatastoresPage",
"SyncDocumentsPage",
"AsyncDocumentsPage",
- "SyncAgentsPage",
- "AsyncAgentsPage",
+ "SyncPage",
+ "AsyncPage",
]
_T = TypeVar("_T")
@@ -97,7 +97,7 @@ def next_page_info(self) -> Optional[PageInfo]:
return PageInfo(params={"cursor": next_cursor})
-class SyncAgentsPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
+class SyncPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
agents: List[_T]
next_cursor: Optional[str] = None
@@ -117,7 +117,7 @@ def next_page_info(self) -> Optional[PageInfo]:
return PageInfo(params={"cursor": next_cursor})
-class AsyncAgentsPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
+class AsyncPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
agents: List[_T]
next_cursor: Optional[str] = None
diff --git a/src/contextual/resources/agents/agents.py b/src/contextual/resources/agents/agents.py
index bea8eda..e662b69 100644
--- a/src/contextual/resources/agents/agents.py
+++ b/src/contextual/resources/agents/agents.py
@@ -36,7 +36,7 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ...pagination import SyncAgentsPage, AsyncAgentsPage
+from ...pagination import SyncPage, AsyncPage
from ...types.agent import Agent
from ..._base_client import AsyncPaginator, make_request_options
from .datasets.datasets import (
@@ -242,7 +242,7 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncAgentsPage[Agent]:
+ ) -> SyncPage[Agent]:
"""
Retrieve a list of all `Agents`.
@@ -262,7 +262,7 @@ def list(
"""
return self._get_api_list(
"/agents",
- page=SyncAgentsPage[Agent],
+ page=SyncPage[Agent],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -536,7 +536,7 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[Agent, AsyncAgentsPage[Agent]]:
+ ) -> AsyncPaginator[Agent, AsyncPage[Agent]]:
"""
Retrieve a list of all `Agents`.
@@ -556,7 +556,7 @@ def list(
"""
return self._get_api_list(
"/agents",
- page=AsyncAgentsPage[Agent],
+ page=AsyncPage[Agent],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py
index adb73a6..30eb7db 100644
--- a/tests/api_resources/test_agents.py
+++ b/tests/api_resources/test_agents.py
@@ -14,7 +14,7 @@
AgentMetadata,
CreateAgentOutput,
)
-from contextual.pagination import SyncAgentsPage, AsyncAgentsPage
+from contextual.pagination import SyncPage, AsyncPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -116,7 +116,7 @@ def test_path_params_update(self, client: ContextualAI) -> None:
@parametrize
def test_method_list(self, client: ContextualAI) -> None:
agent = client.agents.list()
- assert_matches_type(SyncAgentsPage[Agent], agent, path=["response"])
+ assert_matches_type(SyncPage[Agent], agent, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: ContextualAI) -> None:
@@ -124,7 +124,7 @@ def test_method_list_with_all_params(self, client: ContextualAI) -> None:
cursor="cursor",
limit=1,
)
- assert_matches_type(SyncAgentsPage[Agent], agent, path=["response"])
+ assert_matches_type(SyncPage[Agent], agent, path=["response"])
@parametrize
def test_raw_response_list(self, client: ContextualAI) -> None:
@@ -133,7 +133,7 @@ def test_raw_response_list(self, client: ContextualAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = response.parse()
- assert_matches_type(SyncAgentsPage[Agent], agent, path=["response"])
+ assert_matches_type(SyncPage[Agent], agent, path=["response"])
@parametrize
def test_streaming_response_list(self, client: ContextualAI) -> None:
@@ -142,7 +142,7 @@ def test_streaming_response_list(self, client: ContextualAI) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = response.parse()
- assert_matches_type(SyncAgentsPage[Agent], agent, path=["response"])
+ assert_matches_type(SyncPage[Agent], agent, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -320,7 +320,7 @@ async def test_path_params_update(self, async_client: AsyncContextualAI) -> None
@parametrize
async def test_method_list(self, async_client: AsyncContextualAI) -> None:
agent = await async_client.agents.list()
- assert_matches_type(AsyncAgentsPage[Agent], agent, path=["response"])
+ assert_matches_type(AsyncPage[Agent], agent, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncContextualAI) -> None:
@@ -328,7 +328,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncContextualAI
cursor="cursor",
limit=1,
)
- assert_matches_type(AsyncAgentsPage[Agent], agent, path=["response"])
+ assert_matches_type(AsyncPage[Agent], agent, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None:
@@ -337,7 +337,7 @@ async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = await response.parse()
- assert_matches_type(AsyncAgentsPage[Agent], agent, path=["response"])
+ assert_matches_type(AsyncPage[Agent], agent, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None:
@@ -346,7 +346,7 @@ async def test_streaming_response_list(self, async_client: AsyncContextualAI) ->
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
agent = await response.parse()
- assert_matches_type(AsyncAgentsPage[Agent], agent, path=["response"])
+ assert_matches_type(AsyncPage[Agent], agent, path=["response"])
assert cast(Any, response.is_closed) is True
From 264f896ebca57263a5e87950d1d2a51c5687886e Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 15 Jan 2025 09:06:56 +0000
Subject: [PATCH 4/4] release: 0.1.0-alpha.2
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 10 ++++++++++
pyproject.toml | 2 +-
src/contextual/_version.py | 2 +-
4 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index ba6c348..f14b480 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.1.0-alpha.1"
+ ".": "0.1.0-alpha.2"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6add48b..006cd6b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,15 @@
# Changelog
+## 0.1.0-alpha.2 (2025-01-15)
+
+Full Changelog: [v0.1.0-alpha.1...v0.1.0-alpha.2](https://github.com/ContextualAI/contextual-client-python/compare/v0.1.0-alpha.1...v0.1.0-alpha.2)
+
+### Features
+
+* **api:** update via SDK Studio ([#16](https://github.com/ContextualAI/contextual-client-python/issues/16)) ([eef8a87](https://github.com/ContextualAI/contextual-client-python/commit/eef8a87c1f4d1c57fce697103d07c8510fcc4520))
+* **api:** update via SDK Studio ([#18](https://github.com/ContextualAI/contextual-client-python/issues/18)) ([990c359](https://github.com/ContextualAI/contextual-client-python/commit/990c359ab3f2e6c3f29fc07e158c895159e8cc94))
+* **api:** update via SDK Studio ([#19](https://github.com/ContextualAI/contextual-client-python/issues/19)) ([4eeaea9](https://github.com/ContextualAI/contextual-client-python/commit/4eeaea95542c416d4dfa0d00e0304c3f88c2be79))
+
## 0.1.0-alpha.1 (2025-01-15)
Full Changelog: [v0.0.1-alpha.0...v0.1.0-alpha.1](https://github.com/ContextualAI/contextual-client-python/compare/v0.0.1-alpha.0...v0.1.0-alpha.1)
diff --git a/pyproject.toml b/pyproject.toml
index 7b4bd09..a047194 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "contextual-client"
-version = "0.1.0-alpha.1"
+version = "0.1.0-alpha.2"
description = "The official Python library for the Contextual AI API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/contextual/_version.py b/src/contextual/_version.py
index 68871a1..ba837ba 100644
--- a/src/contextual/_version.py
+++ b/src/contextual/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "contextual"
-__version__ = "0.1.0-alpha.1" # x-release-please-version
+__version__ = "0.1.0-alpha.2" # x-release-please-version