diff --git a/.stats.yml b/.stats.yml
index 53f3a7c9..a8e67cf3 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
-configured_endpoints: 35
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-d79ccb778953ad5c2ae4b99115429c8b3f68b3b23d9b6d90b1b40393f11a4383.yml
+configured_endpoints: 46
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-5298551c424bb999f258bdd6c311e96c80c70701ad59bbce19b46c788ee13bd4.yml
diff --git a/api.md b/api.md
index 582dfe73..d35fcb6f 100644
--- a/api.md
+++ b/api.md
@@ -38,6 +38,7 @@ Methods:
- client.datastores.documents.delete(document_id, \*, datastore_id) -> object
- client.datastores.documents.ingest(datastore_id, \*\*params) -> IngestionResponse
- client.datastores.documents.metadata(document_id, \*, datastore_id) -> DocumentMetadata
+- client.datastores.documents.set_metadata(document_id, \*, datastore_id, \*\*params) -> DocumentMetadata
# Agents
@@ -120,6 +121,23 @@ Types:
from contextual.types.agents import CreateDatasetResponse, DatasetMetadata, ListDatasetsResponse
```
+### Tune
+
+Types:
+
+```python
+from contextual.types.agents.datasets import TuneDeleteResponse
+```
+
+Methods:
+
+- client.agents.datasets.tune.create(agent_id, \*\*params) -> CreateDatasetResponse
+- client.agents.datasets.tune.retrieve(dataset_name, \*, agent_id, \*\*params) -> BinaryAPIResponse
+- client.agents.datasets.tune.update(dataset_name, \*, agent_id, \*\*params) -> CreateDatasetResponse
+- client.agents.datasets.tune.list(agent_id, \*\*params) -> ListDatasetsResponse
+- client.agents.datasets.tune.delete(dataset_name, \*, agent_id) -> object
+- client.agents.datasets.tune.metadata(dataset_name, \*, agent_id, \*\*params) -> DatasetMetadata
+
### Evaluate
Types:
@@ -175,6 +193,27 @@ Methods:
- client.agents.tune.models.list(agent_id) -> ListTuneModelsResponse
+# Users
+
+Types:
+
+```python
+from contextual.types import (
+ InviteUsersResponse,
+ ListUsersResponse,
+ NewUser,
+ UserUpdateResponse,
+ UserDeactivateResponse,
+)
+```
+
+Methods:
+
+- client.users.update(\*\*params) -> object
+- client.users.list(\*\*params) -> ListUsersResponse
+- client.users.deactivate(\*\*params) -> object
+- client.users.invite(\*\*params) -> InviteUsersResponse
+
# LMUnit
Types:
diff --git a/src/contextual/_client.py b/src/contextual/_client.py
index e427da04..53dd26ac 100644
--- a/src/contextual/_client.py
+++ b/src/contextual/_client.py
@@ -24,7 +24,7 @@
get_async_library,
)
from ._version import __version__
-from .resources import lmunit, rerank, generate
+from .resources import users, lmunit, rerank, generate
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import APIStatusError, ContextualAIError
from ._base_client import (
@@ -50,6 +50,7 @@
class ContextualAI(SyncAPIClient):
datastores: datastores.DatastoresResource
agents: agents.AgentsResource
+ users: users.UsersResource
lmunit: lmunit.LMUnitResource
rerank: rerank.RerankResource
generate: generate.GenerateResource
@@ -112,6 +113,7 @@ def __init__(
self.datastores = datastores.DatastoresResource(self)
self.agents = agents.AgentsResource(self)
+ self.users = users.UsersResource(self)
self.lmunit = lmunit.LMUnitResource(self)
self.rerank = rerank.RerankResource(self)
self.generate = generate.GenerateResource(self)
@@ -226,6 +228,7 @@ def _make_status_error(
class AsyncContextualAI(AsyncAPIClient):
datastores: datastores.AsyncDatastoresResource
agents: agents.AsyncAgentsResource
+ users: users.AsyncUsersResource
lmunit: lmunit.AsyncLMUnitResource
rerank: rerank.AsyncRerankResource
generate: generate.AsyncGenerateResource
@@ -288,6 +291,7 @@ def __init__(
self.datastores = datastores.AsyncDatastoresResource(self)
self.agents = agents.AsyncAgentsResource(self)
+ self.users = users.AsyncUsersResource(self)
self.lmunit = lmunit.AsyncLMUnitResource(self)
self.rerank = rerank.AsyncRerankResource(self)
self.generate = generate.AsyncGenerateResource(self)
@@ -403,6 +407,7 @@ class ContextualAIWithRawResponse:
def __init__(self, client: ContextualAI) -> None:
self.datastores = datastores.DatastoresResourceWithRawResponse(client.datastores)
self.agents = agents.AgentsResourceWithRawResponse(client.agents)
+ self.users = users.UsersResourceWithRawResponse(client.users)
self.lmunit = lmunit.LMUnitResourceWithRawResponse(client.lmunit)
self.rerank = rerank.RerankResourceWithRawResponse(client.rerank)
self.generate = generate.GenerateResourceWithRawResponse(client.generate)
@@ -412,6 +417,7 @@ class AsyncContextualAIWithRawResponse:
def __init__(self, client: AsyncContextualAI) -> None:
self.datastores = datastores.AsyncDatastoresResourceWithRawResponse(client.datastores)
self.agents = agents.AsyncAgentsResourceWithRawResponse(client.agents)
+ self.users = users.AsyncUsersResourceWithRawResponse(client.users)
self.lmunit = lmunit.AsyncLMUnitResourceWithRawResponse(client.lmunit)
self.rerank = rerank.AsyncRerankResourceWithRawResponse(client.rerank)
self.generate = generate.AsyncGenerateResourceWithRawResponse(client.generate)
@@ -421,6 +427,7 @@ class ContextualAIWithStreamedResponse:
def __init__(self, client: ContextualAI) -> None:
self.datastores = datastores.DatastoresResourceWithStreamingResponse(client.datastores)
self.agents = agents.AgentsResourceWithStreamingResponse(client.agents)
+ self.users = users.UsersResourceWithStreamingResponse(client.users)
self.lmunit = lmunit.LMUnitResourceWithStreamingResponse(client.lmunit)
self.rerank = rerank.RerankResourceWithStreamingResponse(client.rerank)
self.generate = generate.GenerateResourceWithStreamingResponse(client.generate)
@@ -430,6 +437,7 @@ class AsyncContextualAIWithStreamedResponse:
def __init__(self, client: AsyncContextualAI) -> None:
self.datastores = datastores.AsyncDatastoresResourceWithStreamingResponse(client.datastores)
self.agents = agents.AsyncAgentsResourceWithStreamingResponse(client.agents)
+ self.users = users.AsyncUsersResourceWithStreamingResponse(client.users)
self.lmunit = lmunit.AsyncLMUnitResourceWithStreamingResponse(client.lmunit)
self.rerank = rerank.AsyncRerankResourceWithStreamingResponse(client.rerank)
self.generate = generate.AsyncGenerateResourceWithStreamingResponse(client.generate)
diff --git a/src/contextual/resources/__init__.py b/src/contextual/resources/__init__.py
index a46db872..02594ebc 100644
--- a/src/contextual/resources/__init__.py
+++ b/src/contextual/resources/__init__.py
@@ -1,5 +1,13 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from .users import (
+ UsersResource,
+ AsyncUsersResource,
+ UsersResourceWithRawResponse,
+ AsyncUsersResourceWithRawResponse,
+ UsersResourceWithStreamingResponse,
+ AsyncUsersResourceWithStreamingResponse,
+)
from .agents import (
AgentsResource,
AsyncAgentsResource,
@@ -54,6 +62,12 @@
"AsyncAgentsResourceWithRawResponse",
"AgentsResourceWithStreamingResponse",
"AsyncAgentsResourceWithStreamingResponse",
+ "UsersResource",
+ "AsyncUsersResource",
+ "UsersResourceWithRawResponse",
+ "AsyncUsersResourceWithRawResponse",
+ "UsersResourceWithStreamingResponse",
+ "AsyncUsersResourceWithStreamingResponse",
"LMUnitResource",
"AsyncLMUnitResource",
"LMUnitResourceWithRawResponse",
diff --git a/src/contextual/resources/agents/agents.py b/src/contextual/resources/agents/agents.py
index 8696a9e3..72a291c1 100644
--- a/src/contextual/resources/agents/agents.py
+++ b/src/contextual/resources/agents/agents.py
@@ -101,6 +101,7 @@ def create(
self,
*,
name: str,
+ agent_configs: agent_create_params.AgentConfigs | NotGiven = NOT_GIVEN,
datastore_ids: List[str] | NotGiven = NOT_GIVEN,
description: str | NotGiven = NOT_GIVEN,
suggested_queries: List[str] | NotGiven = NOT_GIVEN,
@@ -128,8 +129,9 @@ def create(
Args:
name: Name of the agent
- datastore_ids: The IDs of the datastore associated with the agent. Leave empty to automatically
- create a new datastore.
+ agent_configs: The following advanced parameters are experimental and subject to change.
+
+ datastore_ids: The IDs of the datastore to associate with this agent.
description: Description of the agent
@@ -154,6 +156,7 @@ def create(
body=maybe_transform(
{
"name": name,
+ "agent_configs": agent_configs,
"datastore_ids": datastore_ids,
"description": description,
"suggested_queries": suggested_queries,
@@ -171,6 +174,7 @@ def update(
self,
agent_id: str,
*,
+ agent_configs: agent_update_params.AgentConfigs | NotGiven = NOT_GIVEN,
datastore_ids: List[str] | NotGiven = NOT_GIVEN,
llm_model_id: str | NotGiven = NOT_GIVEN,
suggested_queries: List[str] | NotGiven = NOT_GIVEN,
@@ -190,6 +194,8 @@ def update(
Args:
agent_id: ID of the agent to edit
+ agent_configs: The following advanced parameters are experimental and subject to change.
+
datastore_ids: IDs of the datastore to associate with the agent.
llm_model_id: The model ID to use for generation. Tuned models can only be used for the agents
@@ -218,6 +224,7 @@ def update(
f"/agents/{agent_id}",
body=maybe_transform(
{
+ "agent_configs": agent_configs,
"datastore_ids": datastore_ids,
"llm_model_id": llm_model_id,
"suggested_queries": suggested_queries,
@@ -395,6 +402,7 @@ async def create(
self,
*,
name: str,
+ agent_configs: agent_create_params.AgentConfigs | NotGiven = NOT_GIVEN,
datastore_ids: List[str] | NotGiven = NOT_GIVEN,
description: str | NotGiven = NOT_GIVEN,
suggested_queries: List[str] | NotGiven = NOT_GIVEN,
@@ -422,8 +430,9 @@ async def create(
Args:
name: Name of the agent
- datastore_ids: The IDs of the datastore associated with the agent. Leave empty to automatically
- create a new datastore.
+ agent_configs: The following advanced parameters are experimental and subject to change.
+
+ datastore_ids: The IDs of the datastore to associate with this agent.
description: Description of the agent
@@ -448,6 +457,7 @@ async def create(
body=await async_maybe_transform(
{
"name": name,
+ "agent_configs": agent_configs,
"datastore_ids": datastore_ids,
"description": description,
"suggested_queries": suggested_queries,
@@ -465,6 +475,7 @@ async def update(
self,
agent_id: str,
*,
+ agent_configs: agent_update_params.AgentConfigs | NotGiven = NOT_GIVEN,
datastore_ids: List[str] | NotGiven = NOT_GIVEN,
llm_model_id: str | NotGiven = NOT_GIVEN,
suggested_queries: List[str] | NotGiven = NOT_GIVEN,
@@ -484,6 +495,8 @@ async def update(
Args:
agent_id: ID of the agent to edit
+ agent_configs: The following advanced parameters are experimental and subject to change.
+
datastore_ids: IDs of the datastore to associate with the agent.
llm_model_id: The model ID to use for generation. Tuned models can only be used for the agents
@@ -512,6 +525,7 @@ async def update(
f"/agents/{agent_id}",
body=await async_maybe_transform(
{
+ "agent_configs": agent_configs,
"datastore_ids": datastore_ids,
"llm_model_id": llm_model_id,
"suggested_queries": suggested_queries,
diff --git a/src/contextual/resources/agents/datasets/__init__.py b/src/contextual/resources/agents/datasets/__init__.py
index c97b011b..059bd75f 100644
--- a/src/contextual/resources/agents/datasets/__init__.py
+++ b/src/contextual/resources/agents/datasets/__init__.py
@@ -1,5 +1,13 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from .tune import (
+ TuneResource,
+ AsyncTuneResource,
+ TuneResourceWithRawResponse,
+ AsyncTuneResourceWithRawResponse,
+ TuneResourceWithStreamingResponse,
+ AsyncTuneResourceWithStreamingResponse,
+)
from .datasets import (
DatasetsResource,
AsyncDatasetsResource,
@@ -18,6 +26,12 @@
)
__all__ = [
+ "TuneResource",
+ "AsyncTuneResource",
+ "TuneResourceWithRawResponse",
+ "AsyncTuneResourceWithRawResponse",
+ "TuneResourceWithStreamingResponse",
+ "AsyncTuneResourceWithStreamingResponse",
"EvaluateResource",
"AsyncEvaluateResource",
"EvaluateResourceWithRawResponse",
diff --git a/src/contextual/resources/agents/datasets/datasets.py b/src/contextual/resources/agents/datasets/datasets.py
index c07ea914..8b3610d2 100644
--- a/src/contextual/resources/agents/datasets/datasets.py
+++ b/src/contextual/resources/agents/datasets/datasets.py
@@ -2,6 +2,14 @@
from __future__ import annotations
+from .tune import (
+ TuneResource,
+ AsyncTuneResource,
+ TuneResourceWithRawResponse,
+ AsyncTuneResourceWithRawResponse,
+ TuneResourceWithStreamingResponse,
+ AsyncTuneResourceWithStreamingResponse,
+)
from .evaluate import (
EvaluateResource,
AsyncEvaluateResource,
@@ -17,6 +25,10 @@
class DatasetsResource(SyncAPIResource):
+ @cached_property
+ def tune(self) -> TuneResource:
+ return TuneResource(self._client)
+
@cached_property
def evaluate(self) -> EvaluateResource:
return EvaluateResource(self._client)
@@ -42,6 +54,10 @@ def with_streaming_response(self) -> DatasetsResourceWithStreamingResponse:
class AsyncDatasetsResource(AsyncAPIResource):
+ @cached_property
+ def tune(self) -> AsyncTuneResource:
+ return AsyncTuneResource(self._client)
+
@cached_property
def evaluate(self) -> AsyncEvaluateResource:
return AsyncEvaluateResource(self._client)
@@ -70,6 +86,10 @@ class DatasetsResourceWithRawResponse:
def __init__(self, datasets: DatasetsResource) -> None:
self._datasets = datasets
+ @cached_property
+ def tune(self) -> TuneResourceWithRawResponse:
+ return TuneResourceWithRawResponse(self._datasets.tune)
+
@cached_property
def evaluate(self) -> EvaluateResourceWithRawResponse:
return EvaluateResourceWithRawResponse(self._datasets.evaluate)
@@ -79,6 +99,10 @@ class AsyncDatasetsResourceWithRawResponse:
def __init__(self, datasets: AsyncDatasetsResource) -> None:
self._datasets = datasets
+ @cached_property
+ def tune(self) -> AsyncTuneResourceWithRawResponse:
+ return AsyncTuneResourceWithRawResponse(self._datasets.tune)
+
@cached_property
def evaluate(self) -> AsyncEvaluateResourceWithRawResponse:
return AsyncEvaluateResourceWithRawResponse(self._datasets.evaluate)
@@ -88,6 +112,10 @@ class DatasetsResourceWithStreamingResponse:
def __init__(self, datasets: DatasetsResource) -> None:
self._datasets = datasets
+ @cached_property
+ def tune(self) -> TuneResourceWithStreamingResponse:
+ return TuneResourceWithStreamingResponse(self._datasets.tune)
+
@cached_property
def evaluate(self) -> EvaluateResourceWithStreamingResponse:
return EvaluateResourceWithStreamingResponse(self._datasets.evaluate)
@@ -97,6 +125,10 @@ class AsyncDatasetsResourceWithStreamingResponse:
def __init__(self, datasets: AsyncDatasetsResource) -> None:
self._datasets = datasets
+ @cached_property
+ def tune(self) -> AsyncTuneResourceWithStreamingResponse:
+ return AsyncTuneResourceWithStreamingResponse(self._datasets.tune)
+
@cached_property
def evaluate(self) -> AsyncEvaluateResourceWithStreamingResponse:
return AsyncEvaluateResourceWithStreamingResponse(self._datasets.evaluate)
diff --git a/src/contextual/resources/agents/datasets/tune.py b/src/contextual/resources/agents/datasets/tune.py
new file mode 100644
index 00000000..1a468bbb
--- /dev/null
+++ b/src/contextual/resources/agents/datasets/tune.py
@@ -0,0 +1,939 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Mapping, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ...._utils import (
+ extract_files,
+ maybe_transform,
+ deepcopy_minimal,
+ async_maybe_transform,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ BinaryAPIResponse,
+ AsyncBinaryAPIResponse,
+ StreamedBinaryAPIResponse,
+ AsyncStreamedBinaryAPIResponse,
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ to_custom_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+ to_custom_streamed_response_wrapper,
+ async_to_custom_raw_response_wrapper,
+ async_to_custom_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.agents.datasets import (
+ tune_list_params,
+ tune_create_params,
+ tune_update_params,
+ tune_metadata_params,
+ tune_retrieve_params,
+)
+from ....types.agents.dataset_metadata import DatasetMetadata
+from ....types.agents.list_datasets_response import ListDatasetsResponse
+from ....types.agents.create_dataset_response import CreateDatasetResponse
+
+__all__ = ["TuneResource", "AsyncTuneResource"]
+
+
+class TuneResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> TuneResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
+ """
+ return TuneResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> TuneResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
+ """
+ return TuneResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ agent_id: str,
+ *,
+ dataset_name: str,
+ dataset_type: Literal["tuning_set"],
+ file: FileTypes,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> CreateDatasetResponse:
+ """
+ Create a new tuning `Dataset` for the specified `Agent` using the provided JSONL
+ or CSV file. A `Dataset` is a versioned collection of samples conforming to a
+ particular schema, and can be used as a source of training and test data for
+ tuning jobs.
+
+ Each `Dataset` is versioned and validated against its schema during creation and
+ subsequent updates. The provided `Dataset` file must conform to the schema
+ defined for the `dataset_type`.
+
+ File schema for `dataset_type` `tuning_set` is a CSV file or a JSONL file where
+ each line is one JSON object. The following keys are required:
+
+ - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
+ answer. `knowledge` is a list of retrieved text chunks.
+
+ - `reference` (`str`): The gold-standard answer to the prompt.
+
+ - `guideline` (`str`): Guidelines for model output. If you do not have special
+ guidelines for the model's output, you can use the `System Prompt` defined in
+ your Agent configuration as the `guideline`.
+
+ - `prompt` (`str`): Question for the model to respond to.
+
+ For examples of what `tuning_set` should look like, check out our
+ `Tune & Evaluation Guide`.
+
+ Args:
+ agent_id: Agent ID to associate with the tune dataset
+
+ dataset_name: Name of the tune dataset
+
+ dataset_type: Type of tune dataset which determines its schema and validation rules.
+
+ file: JSONL or CSV file containing the tune dataset
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ body = deepcopy_minimal(
+ {
+ "dataset_name": dataset_name,
+ "dataset_type": dataset_type,
+ "file": file,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._post(
+ f"/agents/{agent_id}/datasets/tune",
+ body=maybe_transform(body, tune_create_params.TuneCreateParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=CreateDatasetResponse,
+ )
+
+ def retrieve(
+ self,
+ dataset_name: str,
+ *,
+ agent_id: str,
+ batch_size: int | NotGiven = NOT_GIVEN,
+ version: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BinaryAPIResponse:
+ """Stream the raw content of a tuning `Dataset` version.
+
+ If no version is
+ specified, the latest version is used.
+
+ The `Dataset` content is downloaded in batches. Batch size can be configured to
+ meet specific processing requirements.
+
+ Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with:
+
+ - Content-Type: application/octet-stream
+
+ - Content-Disposition: attachment
+
+ - Chunked transfer encoding
+
+ Args:
+ agent_id: Agent ID associated with the tune dataset
+
+ dataset_name: Name of the tune dataset to retrieve
+
+ batch_size: Batch size for processing
+
+ version: Version number of the tune dataset to retrieve. Defaults to the latest version
+ if not specified.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ if not dataset_name:
+ raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
+ extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
+ return self._get(
+ f"/agents/{agent_id}/datasets/tune/{dataset_name}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "batch_size": batch_size,
+ "version": version,
+ },
+ tune_retrieve_params.TuneRetrieveParams,
+ ),
+ ),
+ cast_to=BinaryAPIResponse,
+ )
+
+ def update(
+ self,
+ dataset_name: str,
+ *,
+ agent_id: str,
+ dataset_type: Literal["tuning_set"],
+ file: FileTypes,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> CreateDatasetResponse:
+ """
+ Append to an existing tuning `Dataset`.
+
+ Create a new version of the dataset by appending content to the `Dataset` and
+ validating against its schema.
+
+ File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
+ where each line is one JSON object. The following keys are required:
+
+ - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
+ answer. `knowledge` is a list of retrieved text chunks.
+
+ - `reference` (`str`): The gold-standard answer to the prompt.
+
+ - `guideline` (`str`): Guidelines for model output. If you do not have special
+ guidelines for the model's output, you can use the `System Prompt` defined in
+ your Agent configuration as the `guideline`.
+
+ - `prompt` (`str`): Question for the model to respond to.
+
+ For examples of what `tuning_set` should look like, check out our
+ `Tune & Evaluation Guide`.
+
+ Args:
+ agent_id: Agent ID associated with the tune dataset
+
+ dataset_name: Name of the tune dataset to append to
+
+ dataset_type: Type of tune dataset which determines its schema and validation rules. Must
+ match the `dataset_type` used at dataset creation time.
+
+ file: JSONL or CSV file containing the entries to append to the tune dataset
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ if not dataset_name:
+ raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
+ body = deepcopy_minimal(
+ {
+ "dataset_type": dataset_type,
+ "file": file,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._put(
+ f"/agents/{agent_id}/datasets/tune/{dataset_name}",
+ body=maybe_transform(body, tune_update_params.TuneUpdateParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=CreateDatasetResponse,
+ )
+
+ def list(
+ self,
+ agent_id: str,
+ *,
+ dataset_name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ListDatasetsResponse:
+ """
+ List all tuning `Datasets` and their versions belonging to a particular `Agent`.
+
+ If a `dataset_name` filter is provided, all versions of that `Dataset` will be
+ listed.
+
+ Includes metadata and schema for each `Dataset` version.
+
+ Args:
+ agent_id: Agent ID for which to list associated evaluation datasets
+
+ dataset_name: Optional dataset name to filter the results by. If provided, only versions from
+ that dataset are listed.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ return self._get(
+ f"/agents/{agent_id}/datasets/tune",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"dataset_name": dataset_name}, tune_list_params.TuneListParams),
+ ),
+ cast_to=ListDatasetsResponse,
+ )
+
+ def delete(
+ self,
+ dataset_name: str,
+ *,
+ agent_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> object:
+ """
+ Delete a tuning `Dataset` and all its versions.
+
+ Permanently removes the `Dataset`, including all associated metadata.
+
+ This operation is irreversible.
+
+ Args:
+ agent_id: Agent ID associated with the tune dataset
+
+ dataset_name: Name of the tune dataset to delete
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ if not dataset_name:
+ raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
+ return self._delete(
+ f"/agents/{agent_id}/datasets/tune/{dataset_name}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=object,
+ )
+
+ def metadata(
+ self,
+ dataset_name: str,
+ *,
+ agent_id: str,
+ version: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> DatasetMetadata:
+ """
+ Retrieve details of a specific tuning `Dataset` version, or the latest version
+ if no `version` is specified.
+
+ Provides comprehensive information about the `Dataset`, including its metadata
+ and schema.
+
+ Args:
+ agent_id: Agent ID associated with the tune dataset
+
+ dataset_name: Name of the tune dataset to retrieve details for
+
+ version: Version number of the dataset. Defaults to the latest version if not specified.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ if not dataset_name:
+ raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
+ return self._get(
+ f"/agents/{agent_id}/datasets/tune/{dataset_name}/metadata",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"version": version}, tune_metadata_params.TuneMetadataParams),
+ ),
+ cast_to=DatasetMetadata,
+ )
+
+
+class AsyncTuneResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncTuneResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncTuneResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncTuneResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
+ """
+ return AsyncTuneResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ agent_id: str,
+ *,
+ dataset_name: str,
+ dataset_type: Literal["tuning_set"],
+ file: FileTypes,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> CreateDatasetResponse:
+ """
+ Create a new tuning `Dataset` for the specified `Agent` using the provided JSONL
+ or CSV file. A `Dataset` is a versioned collection of samples conforming to a
+ particular schema, and can be used as a source of training and test data for
+ tuning jobs.
+
+ Each `Dataset` is versioned and validated against its schema during creation and
+ subsequent updates. The provided `Dataset` file must conform to the schema
+ defined for the `dataset_type`.
+
+ File schema for `dataset_type` `tuning_set` is a CSV file or a JSONL file where
+ each line is one JSON object. The following keys are required:
+
+ - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
+ answer. `knowledge` is a list of retrieved text chunks.
+
+ - `reference` (`str`): The gold-standard answer to the prompt.
+
+ - `guideline` (`str`): Guidelines for model output. If you do not have special
+ guidelines for the model's output, you can use the `System Prompt` defined in
+ your Agent configuration as the `guideline`.
+
+ - `prompt` (`str`): Question for the model to respond to.
+
+ For examples of what `tuning_set` should look like, check out our
+ `Tune & Evaluation Guide`.
+
+ Args:
+ agent_id: Agent ID to associate with the tune dataset
+
+ dataset_name: Name of the tune dataset
+
+ dataset_type: Type of tune dataset which determines its schema and validation rules.
+
+ file: JSONL or CSV file containing the tune dataset
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ body = deepcopy_minimal(
+ {
+ "dataset_name": dataset_name,
+ "dataset_type": dataset_type,
+ "file": file,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ f"/agents/{agent_id}/datasets/tune",
+ body=await async_maybe_transform(body, tune_create_params.TuneCreateParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=CreateDatasetResponse,
+ )
+
+ async def retrieve(
+ self,
+ dataset_name: str,
+ *,
+ agent_id: str,
+ batch_size: int | NotGiven = NOT_GIVEN,
+ version: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncBinaryAPIResponse:
+ """Stream the raw content of a tuning `Dataset` version.
+
+ If no version is
+ specified, the latest version is used.
+
+ The `Dataset` content is downloaded in batches. Batch size can be configured to
+ meet specific processing requirements.
+
+ Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with:
+
+ - Content-Type: application/octet-stream
+
+ - Content-Disposition: attachment
+
+ - Chunked transfer encoding
+
+ Args:
+ agent_id: Agent ID associated with the tune dataset
+
+ dataset_name: Name of the tune dataset to retrieve
+
+ batch_size: Batch size for processing
+
+ version: Version number of the tune dataset to retrieve. Defaults to the latest version
+ if not specified.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ if not dataset_name:
+ raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
+ extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
+ return await self._get(
+ f"/agents/{agent_id}/datasets/tune/{dataset_name}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "batch_size": batch_size,
+ "version": version,
+ },
+ tune_retrieve_params.TuneRetrieveParams,
+ ),
+ ),
+ cast_to=AsyncBinaryAPIResponse,
+ )
+
+ async def update(
+ self,
+ dataset_name: str,
+ *,
+ agent_id: str,
+ dataset_type: Literal["tuning_set"],
+ file: FileTypes,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> CreateDatasetResponse:
+ """
+ Append to an existing tuning `Dataset`.
+
+ Create a new version of the dataset by appending content to the `Dataset` and
+ validating against its schema.
+
+ File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
+ where each line is one JSON object. The following keys are required:
+
+ - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
+ answer. `knowledge` is a list of retrieved text chunks.
+
+ - `reference` (`str`): The gold-standard answer to the prompt.
+
+ - `guideline` (`str`): Guidelines for model output. If you do not have special
+ guidelines for the model's output, you can use the `System Prompt` defined in
+ your Agent configuration as the `guideline`.
+
+ - `prompt` (`str`): Question for the model to respond to.
+
+ For examples of what `tuning_set` should look like, check out our
+ `Tune & Evaluation Guide`.
+
+ Args:
+ agent_id: Agent ID associated with the tune dataset
+
+ dataset_name: Name of the tune dataset to append to
+
+ dataset_type: Type of tune dataset which determines its schema and validation rules. Must
+ match the `dataset_type` used at dataset creation time.
+
+ file: JSONL or CSV file containing the entries to append to the tune dataset
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ if not dataset_name:
+ raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
+ body = deepcopy_minimal(
+ {
+ "dataset_type": dataset_type,
+ "file": file,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._put(
+ f"/agents/{agent_id}/datasets/tune/{dataset_name}",
+ body=await async_maybe_transform(body, tune_update_params.TuneUpdateParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=CreateDatasetResponse,
+ )
+
+ async def list(
+ self,
+ agent_id: str,
+ *,
+ dataset_name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ListDatasetsResponse:
+ """
+ List all tuning `Datasets` and their versions belonging to a particular `Agent`.
+
+ If a `dataset_name` filter is provided, all versions of that `Dataset` will be
+ listed.
+
+ Includes metadata and schema for each `Dataset` version.
+
+ Args:
+ agent_id: Agent ID for which to list associated evaluation datasets
+
+ dataset_name: Optional dataset name to filter the results by. If provided, only versions from
+ that dataset are listed.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ return await self._get(
+ f"/agents/{agent_id}/datasets/tune",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"dataset_name": dataset_name}, tune_list_params.TuneListParams),
+ ),
+ cast_to=ListDatasetsResponse,
+ )
+
+ async def delete(
+ self,
+ dataset_name: str,
+ *,
+ agent_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> object:
+ """
+ Delete a tuning `Dataset` and all its versions.
+
+ Permanently removes the `Dataset`, including all associated metadata.
+
+ This operation is irreversible.
+
+ Args:
+ agent_id: Agent ID associated with the tune dataset
+
+ dataset_name: Name of the tune dataset to delete
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ if not dataset_name:
+ raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
+ return await self._delete(
+ f"/agents/{agent_id}/datasets/tune/{dataset_name}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=object,
+ )
+
+ async def metadata(
+ self,
+ dataset_name: str,
+ *,
+ agent_id: str,
+ version: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> DatasetMetadata:
+ """
+ Retrieve details of a specific tuning `Dataset` version, or the latest version
+ if no `version` is specified.
+
+ Provides comprehensive information about the `Dataset`, including its metadata
+ and schema.
+
+ Args:
+ agent_id: Agent ID associated with the tune dataset
+
+ dataset_name: Name of the tune dataset to retrieve details for
+
+ version: Version number of the dataset. Defaults to the latest version if not specified.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ if not dataset_name:
+ raise ValueError(f"Expected a non-empty value for `dataset_name` but received {dataset_name!r}")
+ return await self._get(
+ f"/agents/{agent_id}/datasets/tune/{dataset_name}/metadata",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"version": version}, tune_metadata_params.TuneMetadataParams),
+ ),
+ cast_to=DatasetMetadata,
+ )
+
+
+class TuneResourceWithRawResponse:
+ def __init__(self, tune: TuneResource) -> None:
+ self._tune = tune
+
+ self.create = to_raw_response_wrapper(
+ tune.create,
+ )
+ self.retrieve = to_custom_raw_response_wrapper(
+ tune.retrieve,
+ BinaryAPIResponse,
+ )
+ self.update = to_raw_response_wrapper(
+ tune.update,
+ )
+ self.list = to_raw_response_wrapper(
+ tune.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ tune.delete,
+ )
+ self.metadata = to_raw_response_wrapper(
+ tune.metadata,
+ )
+
+
+class AsyncTuneResourceWithRawResponse:
+ def __init__(self, tune: AsyncTuneResource) -> None:
+ self._tune = tune
+
+ self.create = async_to_raw_response_wrapper(
+ tune.create,
+ )
+ self.retrieve = async_to_custom_raw_response_wrapper(
+ tune.retrieve,
+ AsyncBinaryAPIResponse,
+ )
+ self.update = async_to_raw_response_wrapper(
+ tune.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ tune.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ tune.delete,
+ )
+ self.metadata = async_to_raw_response_wrapper(
+ tune.metadata,
+ )
+
+
+class TuneResourceWithStreamingResponse:
+ def __init__(self, tune: TuneResource) -> None:
+ self._tune = tune
+
+ self.create = to_streamed_response_wrapper(
+ tune.create,
+ )
+ self.retrieve = to_custom_streamed_response_wrapper(
+ tune.retrieve,
+ StreamedBinaryAPIResponse,
+ )
+ self.update = to_streamed_response_wrapper(
+ tune.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ tune.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ tune.delete,
+ )
+ self.metadata = to_streamed_response_wrapper(
+ tune.metadata,
+ )
+
+
+class AsyncTuneResourceWithStreamingResponse:
+ def __init__(self, tune: AsyncTuneResource) -> None:
+ self._tune = tune
+
+ self.create = async_to_streamed_response_wrapper(
+ tune.create,
+ )
+ self.retrieve = async_to_custom_streamed_response_wrapper(
+ tune.retrieve,
+ AsyncStreamedBinaryAPIResponse,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ tune.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ tune.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ tune.delete,
+ )
+ self.metadata = async_to_streamed_response_wrapper(
+ tune.metadata,
+ )
diff --git a/src/contextual/resources/agents/query.py b/src/contextual/resources/agents/query.py
index e7ec483c..7f6d9de0 100644
--- a/src/contextual/resources/agents/query.py
+++ b/src/contextual/resources/agents/query.py
@@ -83,10 +83,11 @@ def create(
multiple objects to provide conversation history. Last message in the list must
be a `user`-sent message (i.e. `role` equals `"user"`).
- include_retrieval_content_text: Ignored if `retrievals_only` is True. Set to `true` to include the text of the
- retrieved contents in the response. If `false`, only metadata about the
- retrieved contents will be included, not content text. Content text and other
- metadata can also be fetched separately using the
+ include_retrieval_content_text: Set to `true` to include the text of the retrieved contents in the response. If
+ `false`, only metadata about the retrieved contents will be included, not
+ content text. This parameter is ignored if `retrievals_only` is `true`, in which
+ case `content_text` will always be returned. Content text and other metadata can
+ also be fetched separately using the
`/agents/{agent_id}/query/{message_id}/retrieval/info` endpoint.
retrievals_only: Set to `true` to fetch retrieval content and metadata, and then skip generation
@@ -365,10 +366,11 @@ async def create(
multiple objects to provide conversation history. Last message in the list must
be a `user`-sent message (i.e. `role` equals `"user"`).
- include_retrieval_content_text: Ignored if `retrievals_only` is True. Set to `true` to include the text of the
- retrieved contents in the response. If `false`, only metadata about the
- retrieved contents will be included, not content text. Content text and other
- metadata can also be fetched separately using the
+ include_retrieval_content_text: Set to `true` to include the text of the retrieved contents in the response. If
+ `false`, only metadata about the retrieved contents will be included, not
+ content text. This parameter is ignored if `retrievals_only` is `true`, in which
+ case `content_text` will always be returned. Content text and other metadata can
+ also be fetched separately using the
`/agents/{agent_id}/query/{message_id}/retrieval/info` endpoint.
retrievals_only: Set to `true` to fetch retrieval content and metadata, and then skip generation
diff --git a/src/contextual/resources/agents/tune/jobs.py b/src/contextual/resources/agents/tune/jobs.py
index b8f324fa..52c25b9b 100644
--- a/src/contextual/resources/agents/tune/jobs.py
+++ b/src/contextual/resources/agents/tune/jobs.py
@@ -52,11 +52,10 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ListTuneJobsResponse:
"""
- Retrieve a list of all tune jobs run for a specified `Agent`, including their
- `status`, `evaluation_results`, and resultant `model_id`.
+ Retrieve a list of all fine-tuning jobs for a specified Agent.
Args:
- agent_id: ID of the agent to list tuning jobs for
+ agent_id: ID of the Agent to list tuning jobs for
extra_headers: Send extra headers
@@ -88,13 +87,13 @@ def delete(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> object:
- """Cancel a tuning job if it is still in progress.
+ """Cancel a specific fine-tuning job.
- If the tuning job has already
- completed, the tuned model will not be deleted.
+ Terminates the fine-tuning job if it is still
+ in progress.
Args:
- agent_id: ID of the agent associated with the tuning job
+ agent_id: ID of the Agent associated with the tuning job
job_id: ID of the tuning job to cancel
@@ -130,16 +129,19 @@ def metadata(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TuneJobMetadata:
- """
- Retrieve the status of a specific tuning job.
+ """Retrieve the status of a specific tuning job.
- After the tuning job is complete, the metadata associated with the tune job will
- include evaluation results and a model ID. You can then deploy the tuned model
- to the agent by editing its config with the tuned model ID and the "Edit Agent"
- API (i.e. the `PUT /agents/{agent_id}` API).
+ Fetches the current status and
+ evaluation results, if available, for the specified tuning job. After the tuning
+ job is complete, the metadata associated with the tune job will include
+ evaluation results and a model ID. You can then activate the tuned model for
+ your agent by editing its config with the tuned model ID and the "Edit Agent"
+ API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you
+ will need to edit the Agent's config again and set the `llm_model_id` field to
+ "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`.
Args:
- agent_id: ID of the agent associated with the tuning job
+ agent_id: ID of the Agent associated with the tuning job
job_id: ID of the tuning job to retrieve the status for
@@ -196,11 +198,10 @@ async def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ListTuneJobsResponse:
"""
- Retrieve a list of all tune jobs run for a specified `Agent`, including their
- `status`, `evaluation_results`, and resultant `model_id`.
+ Retrieve a list of all fine-tuning jobs for a specified Agent.
Args:
- agent_id: ID of the agent to list tuning jobs for
+ agent_id: ID of the Agent to list tuning jobs for
extra_headers: Send extra headers
@@ -232,13 +233,13 @@ async def delete(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> object:
- """Cancel a tuning job if it is still in progress.
+ """Cancel a specific fine-tuning job.
- If the tuning job has already
- completed, the tuned model will not be deleted.
+ Terminates the fine-tuning job if it is still
+ in progress.
Args:
- agent_id: ID of the agent associated with the tuning job
+ agent_id: ID of the Agent associated with the tuning job
job_id: ID of the tuning job to cancel
@@ -274,16 +275,19 @@ async def metadata(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TuneJobMetadata:
- """
- Retrieve the status of a specific tuning job.
+ """Retrieve the status of a specific tuning job.
- After the tuning job is complete, the metadata associated with the tune job will
- include evaluation results and a model ID. You can then deploy the tuned model
- to the agent by editing its config with the tuned model ID and the "Edit Agent"
- API (i.e. the `PUT /agents/{agent_id}` API).
+ Fetches the current status and
+ evaluation results, if available, for the specified tuning job. After the tuning
+ job is complete, the metadata associated with the tune job will include
+ evaluation results and a model ID. You can then activate the tuned model for
+ your agent by editing its config with the tuned model ID and the "Edit Agent"
+ API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you
+ will need to edit the Agent's config again and set the `llm_model_id` field to
+ "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`.
Args:
- agent_id: ID of the agent associated with the tuning job
+ agent_id: ID of the Agent associated with the tuning job
job_id: ID of the tuning job to retrieve the status for
diff --git a/src/contextual/resources/agents/tune/models.py b/src/contextual/resources/agents/tune/models.py
index c7ad6284..5b9b9171 100644
--- a/src/contextual/resources/agents/tune/models.py
+++ b/src/contextual/resources/agents/tune/models.py
@@ -51,10 +51,10 @@ def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ListTuneModelsResponse:
"""
- Retrieves a list of tuned models associated with the specified agent.
+ Retrieves a list of tuned models associated with the specified Agent.
Args:
- agent_id: ID of the agent from which to retrieve tuned models
+ agent_id: ID of the Agent from which to retrieve tuned models
extra_headers: Send extra headers
@@ -107,10 +107,10 @@ async def list(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ListTuneModelsResponse:
"""
- Retrieves a list of tuned models associated with the specified agent.
+ Retrieves a list of tuned models associated with the specified Agent.
Args:
- agent_id: ID of the agent from which to retrieve tuned models
+ agent_id: ID of the Agent from which to retrieve tuned models
extra_headers: Send extra headers
diff --git a/src/contextual/resources/agents/tune/tune.py b/src/contextual/resources/agents/tune/tune.py
index 83352b9e..0b4e9ead 100644
--- a/src/contextual/resources/agents/tune/tune.py
+++ b/src/contextual/resources/agents/tune/tune.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Mapping, cast
+from typing import Mapping, Optional, cast
import httpx
@@ -76,9 +76,10 @@ def create(
self,
agent_id: str,
*,
- training_file: FileTypes,
- model_id: str | NotGiven = NOT_GIVEN,
- test_file: FileTypes | NotGiven = NOT_GIVEN,
+ test_dataset_name: Optional[str] | NotGiven = NOT_GIVEN,
+ test_file: Optional[FileTypes] | NotGiven = NOT_GIVEN,
+ train_dataset_name: Optional[str] | NotGiven = NOT_GIVEN,
+ training_file: Optional[FileTypes] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -90,20 +91,40 @@ def create(
Create a tuning job for the specified `Agent` to specialize it to your specific
domain or use case.
- This API initiates an asynchronous tuning task using the provided
- `training_file` and an optional `test_file`. If no `test_file` is provided, the
- tuning job will hold out a portion of the `training_file` as the test set.
+ This API initiates an asynchronous tuning task. You can provide the required
+ data through one of two ways:
- Returns a tune job `id` which can be used to check on the status of your tuning
- task through the `GET /tune/jobs/{job_id}/metadata` endpoint.
+ - Provide a `training_file` and an optional `test_file`. If no `test_file` is
+ provided, a portion of the `training_file` will be held out as the test set.
+ For easy reusability, the `training_file` is automatically saved as a `Tuning`
+ `Dataset`, and the `test_file` as an `Evaluation` `Dataset`. You can manage
+ them via the `/datasets/tune` and `/datasets/evaluation` endpoints.
+
+ - Provide a `Tuning` `Dataset` and an optional `Evaluation` `Dataset`. You can
+ create a `Tuning` `Dataset` and `Evaluation` `Dataset` using the
+ `/datasets/tune` and `/datasets/evaluation` endpoints respectively.
+
+ The API returns a tune job `id` which can be used to check on the status of your
+ tuning task through the `GET /tune/jobs/{job_id}/metadata` endpoint.
After the tuning job is complete, the metadata associated with the tune job will
include evaluation results and a model ID. You can then deploy the tuned model
to the agent by editing its config with the tuned model ID and the "Edit Agent"
- API (i.e. the `PUT /agents/{agent_id}` API).
+ API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you
+ will need to edit the Agent's config again and set the `llm_model_id` field to
+ "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`.
Args:
- agent_id: ID of the agent to tune
+ agent_id: ID of the Agent to list tuning jobs for
+
+ test_dataset_name: Optional. `Dataset` to use for testing model checkpoints, created through the
+ `/datasets/evaluate` API.
+
+ test_file: Optional. Local path to the test data file. The test file should follow the same
+ format as the training data file.
+
+ train_dataset_name: `Dataset` to use for training, created through the `/datasets/tune` API. Either
+ `train_dataset_name` or `training_file` must be provided, but not both.
training_file: Local path to the training data file.
@@ -116,7 +137,9 @@ def create(
- `reference` (`str`): The gold-standard answer to the prompt.
- - `guideline` (`str`): Guidelines for model output.
+ - `guideline` (`str`): Guidelines for model output. If you do not have special
+ guidelines for the model's output, you can use the `System Prompt` defined in
+ your Agent configuration as the `guideline`.
- `prompt` (`str`): Question for the model to respond to.
@@ -138,12 +161,6 @@ def create(
]
```
- model_id: ID of an existing model to tune. Defaults to the agent's default model if not
- specified.
-
- test_file: Optional. Local path to the test data file. The test file should follow the same
- format as the training data file.
-
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -156,9 +173,10 @@ def create(
raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
body = deepcopy_minimal(
{
- "training_file": training_file,
- "model_id": model_id,
+ "test_dataset_name": test_dataset_name,
"test_file": test_file,
+ "train_dataset_name": train_dataset_name,
+ "training_file": training_file,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["training_file"], ["test_file"]])
@@ -209,9 +227,10 @@ async def create(
self,
agent_id: str,
*,
- training_file: FileTypes,
- model_id: str | NotGiven = NOT_GIVEN,
- test_file: FileTypes | NotGiven = NOT_GIVEN,
+ test_dataset_name: Optional[str] | NotGiven = NOT_GIVEN,
+ test_file: Optional[FileTypes] | NotGiven = NOT_GIVEN,
+ train_dataset_name: Optional[str] | NotGiven = NOT_GIVEN,
+ training_file: Optional[FileTypes] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -223,20 +242,40 @@ async def create(
Create a tuning job for the specified `Agent` to specialize it to your specific
domain or use case.
- This API initiates an asynchronous tuning task using the provided
- `training_file` and an optional `test_file`. If no `test_file` is provided, the
- tuning job will hold out a portion of the `training_file` as the test set.
+ This API initiates an asynchronous tuning task. You can provide the required
+ data through one of two ways:
- Returns a tune job `id` which can be used to check on the status of your tuning
- task through the `GET /tune/jobs/{job_id}/metadata` endpoint.
+ - Provide a `training_file` and an optional `test_file`. If no `test_file` is
+ provided, a portion of the `training_file` will be held out as the test set.
+ For easy reusability, the `training_file` is automatically saved as a `Tuning`
+ `Dataset`, and the `test_file` as an `Evaluation` `Dataset`. You can manage
+ them via the `/datasets/tune` and `/datasets/evaluation` endpoints.
+
+ - Provide a `Tuning` `Dataset` and an optional `Evaluation` `Dataset`. You can
+ create a `Tuning` `Dataset` and `Evaluation` `Dataset` using the
+ `/datasets/tune` and `/datasets/evaluation` endpoints respectively.
+
+ The API returns a tune job `id` which can be used to check on the status of your
+ tuning task through the `GET /tune/jobs/{job_id}/metadata` endpoint.
After the tuning job is complete, the metadata associated with the tune job will
include evaluation results and a model ID. You can then deploy the tuned model
to the agent by editing its config with the tuned model ID and the "Edit Agent"
- API (i.e. the `PUT /agents/{agent_id}` API).
+ API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you
+ will need to edit the Agent's config again and set the `llm_model_id` field to
+ "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`.
Args:
- agent_id: ID of the agent to tune
+ agent_id: ID of the Agent to list tuning jobs for
+
+ test_dataset_name: Optional. `Dataset` to use for testing model checkpoints, created through the
+ `/datasets/evaluate` API.
+
+ test_file: Optional. Local path to the test data file. The test file should follow the same
+ format as the training data file.
+
+ train_dataset_name: `Dataset` to use for training, created through the `/datasets/tune` API. Either
+ `train_dataset_name` or `training_file` must be provided, but not both.
training_file: Local path to the training data file.
@@ -249,7 +288,9 @@ async def create(
- `reference` (`str`): The gold-standard answer to the prompt.
- - `guideline` (`str`): Guidelines for model output.
+ - `guideline` (`str`): Guidelines for model output. If you do not have special
+ guidelines for the model's output, you can use the `System Prompt` defined in
+ your Agent configuration as the `guideline`.
- `prompt` (`str`): Question for the model to respond to.
@@ -271,12 +312,6 @@ async def create(
]
```
- model_id: ID of an existing model to tune. Defaults to the agent's default model if not
- specified.
-
- test_file: Optional. Local path to the test data file. The test file should follow the same
- format as the training data file.
-
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -289,9 +324,10 @@ async def create(
raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
body = deepcopy_minimal(
{
- "training_file": training_file,
- "model_id": model_id,
+ "test_dataset_name": test_dataset_name,
"test_file": test_file,
+ "train_dataset_name": train_dataset_name,
+ "training_file": training_file,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["training_file"], ["test_file"]])
diff --git a/src/contextual/resources/datastores/documents.py b/src/contextual/resources/datastores/documents.py
index 67b7ba6e..60dd9651 100644
--- a/src/contextual/resources/datastores/documents.py
+++ b/src/contextual/resources/datastores/documents.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import List, Union, Mapping, cast
+from typing import Dict, List, Union, Mapping, cast
from datetime import datetime
from typing_extensions import Literal
@@ -25,7 +25,7 @@
)
from ...pagination import SyncDocumentsPage, AsyncDocumentsPage
from ..._base_client import AsyncPaginator, make_request_options
-from ...types.datastores import document_list_params, document_ingest_params
+from ...types.datastores import document_list_params, document_ingest_params, document_set_metadata_params
from ...types.datastores.document_metadata import DocumentMetadata
from ...types.datastores.ingestion_response import IngestionResponse
@@ -171,6 +171,7 @@ def ingest(
datastore_id: str,
*,
file: FileTypes,
+ metadata: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -196,6 +197,12 @@ def ingest(
file: File to ingest
+ metadata: Metadata in `JSON` format. Metadata should be passed in a nested dictionary
+ structure of `str` metadata type to `Dict` mapping `str` metadata keys to `str`,
+ `bool`, `float` or `int` values. Currently, `custom_metadata` is the only
+ supported metadata type.Example `metadata` dictionary: {"metadata":
+ {"custom_metadata": {"customKey1": "value3", "\\__filterKey": "filterValue3"}}
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -206,7 +213,12 @@ def ingest(
"""
if not datastore_id:
raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}")
- body = deepcopy_minimal({"file": file})
+ body = deepcopy_minimal(
+ {
+ "file": file,
+ "metadata": metadata,
+ }
+ )
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
@@ -263,6 +275,52 @@ def metadata(
cast_to=DocumentMetadata,
)
+ def set_metadata(
+ self,
+ document_id: str,
+ *,
+ datastore_id: str,
+ custom_metadata: Dict[str, Union[bool, float, str]] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> DocumentMetadata:
+ """
+ Post details of a given document that will enrich the chunk and be added to the
+ context or just for filtering. If JUst for filtering, start with "\\__" in the
+ key.
+
+ Args:
+ datastore_id: Datastore ID of the datastore from which to retrieve the document
+
+ document_id: Document ID of the document to retrieve details for
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not datastore_id:
+ raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}")
+ if not document_id:
+ raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}")
+ return self._post(
+ f"/datastores/{datastore_id}/documents/{document_id}/metadata",
+ body=maybe_transform(
+ {"custom_metadata": custom_metadata}, document_set_metadata_params.DocumentSetMetadataParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DocumentMetadata,
+ )
+
class AsyncDocumentsResource(AsyncAPIResource):
@cached_property
@@ -403,6 +461,7 @@ async def ingest(
datastore_id: str,
*,
file: FileTypes,
+ metadata: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -428,6 +487,12 @@ async def ingest(
file: File to ingest
+ metadata: Metadata in `JSON` format. Metadata should be passed in a nested dictionary
+ structure of `str` metadata type to `Dict` mapping `str` metadata keys to `str`,
+ `bool`, `float` or `int` values. Currently, `custom_metadata` is the only
+ supported metadata type.Example `metadata` dictionary: {"metadata":
+ {"custom_metadata": {"customKey1": "value3", "\\__filterKey": "filterValue3"}}
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -438,7 +503,12 @@ async def ingest(
"""
if not datastore_id:
raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}")
- body = deepcopy_minimal({"file": file})
+ body = deepcopy_minimal(
+ {
+ "file": file,
+ "metadata": metadata,
+ }
+ )
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
@@ -495,6 +565,52 @@ async def metadata(
cast_to=DocumentMetadata,
)
+ async def set_metadata(
+ self,
+ document_id: str,
+ *,
+ datastore_id: str,
+ custom_metadata: Dict[str, Union[bool, float, str]] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> DocumentMetadata:
+ """
+ Post details of a given document that will enrich the chunk and be added to the
+ context or just for filtering. If JUst for filtering, start with "\\__" in the
+ key.
+
+ Args:
+ datastore_id: Datastore ID of the datastore from which to retrieve the document
+
+ document_id: Document ID of the document to retrieve details for
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not datastore_id:
+ raise ValueError(f"Expected a non-empty value for `datastore_id` but received {datastore_id!r}")
+ if not document_id:
+ raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}")
+ return await self._post(
+ f"/datastores/{datastore_id}/documents/{document_id}/metadata",
+ body=await async_maybe_transform(
+ {"custom_metadata": custom_metadata}, document_set_metadata_params.DocumentSetMetadataParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DocumentMetadata,
+ )
+
class DocumentsResourceWithRawResponse:
def __init__(self, documents: DocumentsResource) -> None:
@@ -512,6 +628,9 @@ def __init__(self, documents: DocumentsResource) -> None:
self.metadata = to_raw_response_wrapper(
documents.metadata,
)
+ self.set_metadata = to_raw_response_wrapper(
+ documents.set_metadata,
+ )
class AsyncDocumentsResourceWithRawResponse:
@@ -530,6 +649,9 @@ def __init__(self, documents: AsyncDocumentsResource) -> None:
self.metadata = async_to_raw_response_wrapper(
documents.metadata,
)
+ self.set_metadata = async_to_raw_response_wrapper(
+ documents.set_metadata,
+ )
class DocumentsResourceWithStreamingResponse:
@@ -548,6 +670,9 @@ def __init__(self, documents: DocumentsResource) -> None:
self.metadata = to_streamed_response_wrapper(
documents.metadata,
)
+ self.set_metadata = to_streamed_response_wrapper(
+ documents.set_metadata,
+ )
class AsyncDocumentsResourceWithStreamingResponse:
@@ -566,3 +691,6 @@ def __init__(self, documents: AsyncDocumentsResource) -> None:
self.metadata = async_to_streamed_response_wrapper(
documents.metadata,
)
+ self.set_metadata = async_to_streamed_response_wrapper(
+ documents.set_metadata,
+ )
diff --git a/src/contextual/resources/generate.py b/src/contextual/resources/generate.py
index 9a1beb6e..8d044f1e 100644
--- a/src/contextual/resources/generate.py
+++ b/src/contextual/resources/generate.py
@@ -52,6 +52,7 @@ def create(
knowledge: List[str],
messages: Iterable[generate_create_params.Message],
model: str,
+ avoid_commentary: bool | NotGiven = NOT_GIVEN,
system_prompt: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -65,7 +66,8 @@ def create(
engineered specifically to prioritize faithfulness to in-context retrievals over
parametric knowledge to reduce hallucinations in Retrieval-Augmented Generation.
- The total request cannot exceed 6,100 tokens.
+ The total request cannot exceed 32,000 tokens. Email glm-feedback@contextual.ai
+ with any feedback or questions.
Args:
knowledge: The knowledge sources the model can use when generating a response.
@@ -75,6 +77,12 @@ def create(
model: The version of the Contextual's GLM to use. Currently, we just have "v1".
+ avoid_commentary: Flag to indicate whether the model should avoid providing additional commentary
+ in responses. Commentary is conversational in nature and does not contain
+ verifiable claims; therefore, commentary is not strictly grounded in available
+ context. However, commentary may provide useful context which improves the
+ helpfulness of responses.
+
system_prompt: Instructions that the model follows when generating responses. Note that we do
not guarantee that the model follows these instructions exactly.
@@ -93,6 +101,7 @@ def create(
"knowledge": knowledge,
"messages": messages,
"model": model,
+ "avoid_commentary": avoid_commentary,
"system_prompt": system_prompt,
},
generate_create_params.GenerateCreateParams,
@@ -130,6 +139,7 @@ async def create(
knowledge: List[str],
messages: Iterable[generate_create_params.Message],
model: str,
+ avoid_commentary: bool | NotGiven = NOT_GIVEN,
system_prompt: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -143,7 +153,8 @@ async def create(
engineered specifically to prioritize faithfulness to in-context retrievals over
parametric knowledge to reduce hallucinations in Retrieval-Augmented Generation.
- The total request cannot exceed 6,100 tokens.
+ The total request cannot exceed 32,000 tokens. Email glm-feedback@contextual.ai
+ with any feedback or questions.
Args:
knowledge: The knowledge sources the model can use when generating a response.
@@ -153,6 +164,12 @@ async def create(
model: The version of the Contextual's GLM to use. Currently, we just have "v1".
+ avoid_commentary: Flag to indicate whether the model should avoid providing additional commentary
+ in responses. Commentary is conversational in nature and does not contain
+ verifiable claims; therefore, commentary is not strictly grounded in available
+ context. However, commentary may provide useful context which improves the
+ helpfulness of responses.
+
system_prompt: Instructions that the model follows when generating responses. Note that we do
not guarantee that the model follows these instructions exactly.
@@ -171,6 +188,7 @@ async def create(
"knowledge": knowledge,
"messages": messages,
"model": model,
+ "avoid_commentary": avoid_commentary,
"system_prompt": system_prompt,
},
generate_create_params.GenerateCreateParams,
diff --git a/src/contextual/resources/rerank.py b/src/contextual/resources/rerank.py
index 79b738c6..65149c9a 100644
--- a/src/contextual/resources/rerank.py
+++ b/src/contextual/resources/rerank.py
@@ -52,6 +52,8 @@ def create(
documents: List[str],
model: str,
query: str,
+ instruction: str | NotGiven = NOT_GIVEN,
+ metadata: List[str] | NotGiven = NOT_GIVEN,
top_n: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -64,7 +66,7 @@ def create(
Rank a list of documents according to their relevance to a query.
The total request cannot exceed 400,000 tokens. The combined length of any
- document and the query must not exceed 4,000 tokens.
+ document, instruction and the query must not exceed 4,000 tokens.
Args:
documents: The texts to be reranked according to their relevance to the query
@@ -73,6 +75,11 @@ def create(
query: The string against which documents will be ranked for relevance
+ instruction: The instruction to be used for the reranker
+
+ metadata: Metadata for documents being passed to the reranker. Must be the same length as
+ the documents list.
+
top_n: The number of top-ranked results to return
extra_headers: Send extra headers
@@ -90,6 +97,8 @@ def create(
"documents": documents,
"model": model,
"query": query,
+ "instruction": instruction,
+ "metadata": metadata,
"top_n": top_n,
},
rerank_create_params.RerankCreateParams,
@@ -127,6 +136,8 @@ async def create(
documents: List[str],
model: str,
query: str,
+ instruction: str | NotGiven = NOT_GIVEN,
+ metadata: List[str] | NotGiven = NOT_GIVEN,
top_n: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -139,7 +150,7 @@ async def create(
Rank a list of documents according to their relevance to a query.
The total request cannot exceed 400,000 tokens. The combined length of any
- document and the query must not exceed 4,000 tokens.
+ document, instruction and the query must not exceed 4,000 tokens.
Args:
documents: The texts to be reranked according to their relevance to the query
@@ -148,6 +159,11 @@ async def create(
query: The string against which documents will be ranked for relevance
+ instruction: The instruction to be used for the reranker
+
+ metadata: Metadata for documents being passed to the reranker. Must be the same length as
+ the documents list.
+
top_n: The number of top-ranked results to return
extra_headers: Send extra headers
@@ -165,6 +181,8 @@ async def create(
"documents": documents,
"model": model,
"query": query,
+ "instruction": instruction,
+ "metadata": metadata,
"top_n": top_n,
},
rerank_create_params.RerankCreateParams,
diff --git a/src/contextual/resources/users.py b/src/contextual/resources/users.py
new file mode 100644
index 00000000..edc2181c
--- /dev/null
+++ b/src/contextual/resources/users.py
@@ -0,0 +1,521 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Iterable
+from typing_extensions import Literal
+
+import httpx
+
+from ..types import user_list_params, user_invite_params, user_update_params, user_deactivate_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._utils import (
+ maybe_transform,
+ async_maybe_transform,
+)
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.new_user_param import NewUserParam
+from ..types.list_users_response import ListUsersResponse
+from ..types.invite_users_response import InviteUsersResponse
+
+__all__ = ["UsersResource", "AsyncUsersResource"]
+
+
+class UsersResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> UsersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
+ """
+ return UsersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> UsersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
+ """
+ return UsersResourceWithStreamingResponse(self)
+
+ def update(
+ self,
+ *,
+ email: str,
+ is_tenant_admin: bool | NotGiven = NOT_GIVEN,
+ per_agent_roles: Iterable[user_update_params.PerAgentRole] | NotGiven = NOT_GIVEN,
+ roles: List[Literal["AGENT_USER"]] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> object:
+ """
+ Modify a given `User`.
+
+ Fields not included in the request body will not be modified.
+
+ Args:
+ email: The email of the user
+
+ is_tenant_admin: Flag indicating if the user is a tenant admin
+
+ per_agent_roles: Per agent level roles for the user. If a user is granted any role under `roles`,
+ then the user has that role for all the agents. Only the roles that need to be
+ updated should be part of this.
+
+ roles: The user level roles of the user.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._put(
+ "/users",
+ body=maybe_transform(
+ {
+ "email": email,
+ "is_tenant_admin": is_tenant_admin,
+ "per_agent_roles": per_agent_roles,
+ "roles": roles,
+ },
+ user_update_params.UserUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=object,
+ )
+
+ def list(
+ self,
+ *,
+ cursor: str | NotGiven = NOT_GIVEN,
+ deactivated: bool | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ search: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ListUsersResponse:
+ """
+ Retrieve a list of `users`.
+
+ Args:
+ cursor: Cursor for the beginning of the current page
+
+ deactivated: When set to true, return deactivated users instead.
+
+ limit: Number of users to return
+
+ search: Query to filter users by email
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/users",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "cursor": cursor,
+ "deactivated": deactivated,
+ "limit": limit,
+ "search": search,
+ },
+ user_list_params.UserListParams,
+ ),
+ ),
+ cast_to=ListUsersResponse,
+ )
+
+ def deactivate(
+ self,
+ *,
+ email: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> object:
+ """
+ Delete a given `user`.
+
+ Args:
+ email: The email of the user
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._delete(
+ "/users",
+ body=maybe_transform({"email": email}, user_deactivate_params.UserDeactivateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=object,
+ )
+
+ def invite(
+ self,
+ *,
+ new_users: Iterable[NewUserParam],
+ tenant_short_name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> InviteUsersResponse:
+ """Invite users to the tenant.
+
+ This checks if the user is already in the tenant and
+ if not, creates the user. We will return a list of user emails that were
+ successfully created (including existing users).
+
+ Args:
+ new_users: List of new users to be invited
+
+ tenant_short_name: The short name of the tenant
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/users",
+ body=maybe_transform(
+ {
+ "new_users": new_users,
+ "tenant_short_name": tenant_short_name,
+ },
+ user_invite_params.UserInviteParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=InviteUsersResponse,
+ )
+
+
+class AsyncUsersResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncUsersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncUsersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/ContextualAI/contextual-client-python#with_streaming_response
+ """
+ return AsyncUsersResourceWithStreamingResponse(self)
+
+ async def update(
+ self,
+ *,
+ email: str,
+ is_tenant_admin: bool | NotGiven = NOT_GIVEN,
+ per_agent_roles: Iterable[user_update_params.PerAgentRole] | NotGiven = NOT_GIVEN,
+ roles: List[Literal["AGENT_USER"]] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> object:
+ """
+ Modify a given `User`.
+
+ Fields not included in the request body will not be modified.
+
+ Args:
+ email: The email of the user
+
+ is_tenant_admin: Flag indicating if the user is a tenant admin
+
+ per_agent_roles: Per agent level roles for the user. If a user is granted any role under `roles`,
+ then the user has that role for all the agents. Only the roles that need to be
+ updated should be part of this.
+
+ roles: The user level roles of the user.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._put(
+ "/users",
+ body=await async_maybe_transform(
+ {
+ "email": email,
+ "is_tenant_admin": is_tenant_admin,
+ "per_agent_roles": per_agent_roles,
+ "roles": roles,
+ },
+ user_update_params.UserUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=object,
+ )
+
+ async def list(
+ self,
+ *,
+ cursor: str | NotGiven = NOT_GIVEN,
+ deactivated: bool | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ search: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ListUsersResponse:
+ """
+ Retrieve a list of `users`.
+
+ Args:
+ cursor: Cursor for the beginning of the current page
+
+ deactivated: When set to true, return deactivated users instead.
+
+ limit: Number of users to return
+
+ search: Query to filter users by email
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/users",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "cursor": cursor,
+ "deactivated": deactivated,
+ "limit": limit,
+ "search": search,
+ },
+ user_list_params.UserListParams,
+ ),
+ ),
+ cast_to=ListUsersResponse,
+ )
+
+ async def deactivate(
+ self,
+ *,
+ email: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> object:
+ """
+ Delete a given `user`.
+
+ Args:
+ email: The email of the user
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._delete(
+ "/users",
+ body=await async_maybe_transform({"email": email}, user_deactivate_params.UserDeactivateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=object,
+ )
+
+ async def invite(
+ self,
+ *,
+ new_users: Iterable[NewUserParam],
+ tenant_short_name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> InviteUsersResponse:
+ """Invite users to the tenant.
+
+ This checks if the user is already in the tenant and
+ if not, creates the user. We will return a list of user emails that were
+ successfully created (including existing users).
+
+ Args:
+ new_users: List of new users to be invited
+
+ tenant_short_name: The short name of the tenant
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/users",
+ body=await async_maybe_transform(
+ {
+ "new_users": new_users,
+ "tenant_short_name": tenant_short_name,
+ },
+ user_invite_params.UserInviteParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=InviteUsersResponse,
+ )
+
+
+class UsersResourceWithRawResponse:
+ def __init__(self, users: UsersResource) -> None:
+ self._users = users
+
+ self.update = to_raw_response_wrapper(
+ users.update,
+ )
+ self.list = to_raw_response_wrapper(
+ users.list,
+ )
+ self.deactivate = to_raw_response_wrapper(
+ users.deactivate,
+ )
+ self.invite = to_raw_response_wrapper(
+ users.invite,
+ )
+
+
+class AsyncUsersResourceWithRawResponse:
+ def __init__(self, users: AsyncUsersResource) -> None:
+ self._users = users
+
+ self.update = async_to_raw_response_wrapper(
+ users.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ users.list,
+ )
+ self.deactivate = async_to_raw_response_wrapper(
+ users.deactivate,
+ )
+ self.invite = async_to_raw_response_wrapper(
+ users.invite,
+ )
+
+
+class UsersResourceWithStreamingResponse:
+ def __init__(self, users: UsersResource) -> None:
+ self._users = users
+
+ self.update = to_streamed_response_wrapper(
+ users.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ users.list,
+ )
+ self.deactivate = to_streamed_response_wrapper(
+ users.deactivate,
+ )
+ self.invite = to_streamed_response_wrapper(
+ users.invite,
+ )
+
+
+class AsyncUsersResourceWithStreamingResponse:
+ def __init__(self, users: AsyncUsersResource) -> None:
+ self._users = users
+
+ self.update = async_to_streamed_response_wrapper(
+ users.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ users.list,
+ )
+ self.deactivate = async_to_streamed_response_wrapper(
+ users.deactivate,
+ )
+ self.invite = async_to_streamed_response_wrapper(
+ users.invite,
+ )
diff --git a/src/contextual/types/__init__.py b/src/contextual/types/__init__.py
index e539092d..d5285a29 100644
--- a/src/contextual/types/__init__.py
+++ b/src/contextual/types/__init__.py
@@ -5,18 +5,25 @@
from .agent import Agent as Agent
from .datastore import Datastore as Datastore
from .agent_metadata import AgentMetadata as AgentMetadata
+from .new_user_param import NewUserParam as NewUserParam
+from .user_list_params import UserListParams as UserListParams
from .agent_list_params import AgentListParams as AgentListParams
from .datastore_metadata import DatastoreMetadata as DatastoreMetadata
+from .user_invite_params import UserInviteParams as UserInviteParams
+from .user_update_params import UserUpdateParams as UserUpdateParams
from .agent_create_params import AgentCreateParams as AgentCreateParams
from .agent_update_params import AgentUpdateParams as AgentUpdateParams
from .create_agent_output import CreateAgentOutput as CreateAgentOutput
+from .list_users_response import ListUsersResponse as ListUsersResponse
from .list_agents_response import ListAgentsResponse as ListAgentsResponse
from .lmunit_create_params import LMUnitCreateParams as LMUnitCreateParams
from .rerank_create_params import RerankCreateParams as RerankCreateParams
from .datastore_list_params import DatastoreListParams as DatastoreListParams
+from .invite_users_response import InviteUsersResponse as InviteUsersResponse
from .generate_create_params import GenerateCreateParams as GenerateCreateParams
from .lmunit_create_response import LMUnitCreateResponse as LMUnitCreateResponse
from .rerank_create_response import RerankCreateResponse as RerankCreateResponse
+from .user_deactivate_params import UserDeactivateParams as UserDeactivateParams
from .datastore_create_params import DatastoreCreateParams as DatastoreCreateParams
from .generate_create_response import GenerateCreateResponse as GenerateCreateResponse
from .list_datastores_response import ListDatastoresResponse as ListDatastoresResponse
diff --git a/src/contextual/types/agent_create_params.py b/src/contextual/types/agent_create_params.py
index 2e26b459..ec8e9207 100644
--- a/src/contextual/types/agent_create_params.py
+++ b/src/contextual/types/agent_create_params.py
@@ -5,18 +5,25 @@
from typing import List
from typing_extensions import Required, TypedDict
-__all__ = ["AgentCreateParams"]
+__all__ = [
+ "AgentCreateParams",
+ "AgentConfigs",
+ "AgentConfigsFilterAndRerankConfig",
+ "AgentConfigsGenerateResponseConfig",
+ "AgentConfigsGlobalConfig",
+ "AgentConfigsRetrievalConfig",
+]
class AgentCreateParams(TypedDict, total=False):
name: Required[str]
"""Name of the agent"""
- datastore_ids: List[str]
- """The IDs of the datastore associated with the agent.
+ agent_configs: AgentConfigs
+ """The following advanced parameters are experimental and subject to change."""
- Leave empty to automatically create a new datastore.
- """
+ datastore_ids: List[str]
+ """The IDs of the datastore to associate with this agent."""
description: str
"""Description of the agent"""
@@ -35,3 +42,73 @@ class AgentCreateParams(TypedDict, total=False):
Note that we do not guarantee that the system will follow these instructions
exactly.
"""
+
+
+class AgentConfigsFilterAndRerankConfig(TypedDict, total=False):
+ top_k_reranked_chunks: int
+ """The number of highest ranked chunks after reranking to be used"""
+
+
+class AgentConfigsGenerateResponseConfig(TypedDict, total=False):
+ frequency_penalty: float
+ """
+ This parameter adjusts how the model treats repeated tokens during text
+ generation.
+ """
+
+ max_new_tokens: int
+ """The maximum number of tokens the model can generate in a response."""
+
+ seed: int
+ """
+ This parameter controls the randomness of how the model selects the next tokens
+ during text generation.
+ """
+
+ temperature: float
+ """The sampling temperature, which affects the randomness in the response."""
+
+ top_p: float
+ """
+ A parameter for nucleus sampling, an alternative to `temperature` which also
+ affects the randomness of the response.
+ """
+
+
+class AgentConfigsGlobalConfig(TypedDict, total=False):
+ enable_filter: bool
+ """Enables filtering of retrieved chunks with a separate LLM"""
+
+ enable_multi_turn: bool
+ """Enables multi-turn conversations.
+
+ This feature is currently experimental and will be improved.
+ """
+
+ enable_rerank: bool
+ """Enables reranking of retrieved chunks"""
+
+
+class AgentConfigsRetrievalConfig(TypedDict, total=False):
+ lexical_alpha: float
+ """The weight of lexical search during retrieval"""
+
+ semantic_alpha: float
+ """The weight of semantic search during retrieval"""
+
+ top_k_retrieved_chunks: int
+ """The maximum number of retrieved chunks from the datastore."""
+
+
+class AgentConfigs(TypedDict, total=False):
+ filter_and_rerank_config: AgentConfigsFilterAndRerankConfig
+ """Parameters that affect filtering and reranking of retrieved knowledge"""
+
+ generate_response_config: AgentConfigsGenerateResponseConfig
+ """Parameters that affect response generation"""
+
+ global_config: AgentConfigsGlobalConfig
+ """Parameters that affect the agent's overall RAG workflow"""
+
+ retrieval_config: AgentConfigsRetrievalConfig
+ """Parameters that affect how the agent retrieves from datastore(s)"""
diff --git a/src/contextual/types/agent_metadata.py b/src/contextual/types/agent_metadata.py
index 5a5dee93..58901b56 100644
--- a/src/contextual/types/agent_metadata.py
+++ b/src/contextual/types/agent_metadata.py
@@ -4,7 +4,84 @@
from .._models import BaseModel
-__all__ = ["AgentMetadata"]
+__all__ = [
+ "AgentMetadata",
+ "AgentConfigs",
+ "AgentConfigsFilterAndRerankConfig",
+ "AgentConfigsGenerateResponseConfig",
+ "AgentConfigsGlobalConfig",
+ "AgentConfigsRetrievalConfig",
+]
+
+
+class AgentConfigsFilterAndRerankConfig(BaseModel):
+ top_k_reranked_chunks: Optional[int] = None
+ """The number of highest ranked chunks after reranking to be used"""
+
+
+class AgentConfigsGenerateResponseConfig(BaseModel):
+ frequency_penalty: Optional[float] = None
+ """
+ This parameter adjusts how the model treats repeated tokens during text
+ generation.
+ """
+
+ max_new_tokens: Optional[int] = None
+ """The maximum number of tokens the model can generate in a response."""
+
+ seed: Optional[int] = None
+ """
+ This parameter controls the randomness of how the model selects the next tokens
+ during text generation.
+ """
+
+ temperature: Optional[float] = None
+ """The sampling temperature, which affects the randomness in the response."""
+
+ top_p: Optional[float] = None
+ """
+ A parameter for nucleus sampling, an alternative to `temperature` which also
+ affects the randomness of the response.
+ """
+
+
+class AgentConfigsGlobalConfig(BaseModel):
+ enable_filter: Optional[bool] = None
+ """Enables filtering of retrieved chunks with a separate LLM"""
+
+ enable_multi_turn: Optional[bool] = None
+ """Enables multi-turn conversations.
+
+ This feature is currently experimental and will be improved.
+ """
+
+ enable_rerank: Optional[bool] = None
+ """Enables reranking of retrieved chunks"""
+
+
+class AgentConfigsRetrievalConfig(BaseModel):
+ lexical_alpha: Optional[float] = None
+ """The weight of lexical search during retrieval"""
+
+ semantic_alpha: Optional[float] = None
+ """The weight of semantic search during retrieval"""
+
+ top_k_retrieved_chunks: Optional[int] = None
+ """The maximum number of retrieved chunks from the datastore."""
+
+
+class AgentConfigs(BaseModel):
+ filter_and_rerank_config: Optional[AgentConfigsFilterAndRerankConfig] = None
+ """Parameters that affect filtering and reranking of retrieved knowledge"""
+
+ generate_response_config: Optional[AgentConfigsGenerateResponseConfig] = None
+ """Parameters that affect response generation"""
+
+ global_config: Optional[AgentConfigsGlobalConfig] = None
+ """Parameters that affect the agent's overall RAG workflow"""
+
+ retrieval_config: Optional[AgentConfigsRetrievalConfig] = None
+ """Parameters that affect how the agent retrieves from datastore(s)"""
class AgentMetadata(BaseModel):
@@ -14,6 +91,9 @@ class AgentMetadata(BaseModel):
name: str
"""Name of the agent"""
+ agent_configs: Optional[AgentConfigs] = None
+ """The following advanced parameters are experimental and subject to change."""
+
description: Optional[str] = None
"""Description of the agent"""
diff --git a/src/contextual/types/agent_update_params.py b/src/contextual/types/agent_update_params.py
index 58fd9f04..34f9ad63 100644
--- a/src/contextual/types/agent_update_params.py
+++ b/src/contextual/types/agent_update_params.py
@@ -5,10 +5,20 @@
from typing import List
from typing_extensions import TypedDict
-__all__ = ["AgentUpdateParams"]
+__all__ = [
+ "AgentUpdateParams",
+ "AgentConfigs",
+ "AgentConfigsFilterAndRerankConfig",
+ "AgentConfigsGenerateResponseConfig",
+ "AgentConfigsGlobalConfig",
+ "AgentConfigsRetrievalConfig",
+]
class AgentUpdateParams(TypedDict, total=False):
+ agent_configs: AgentConfigs
+ """The following advanced parameters are experimental and subject to change."""
+
datastore_ids: List[str]
"""IDs of the datastore to associate with the agent."""
@@ -34,3 +44,73 @@ class AgentUpdateParams(TypedDict, total=False):
Note that we do not guarantee that the system will follow these instructions
exactly.
"""
+
+
+class AgentConfigsFilterAndRerankConfig(TypedDict, total=False):
+ top_k_reranked_chunks: int
+ """The number of highest ranked chunks after reranking to be used"""
+
+
+class AgentConfigsGenerateResponseConfig(TypedDict, total=False):
+ frequency_penalty: float
+ """
+ This parameter adjusts how the model treats repeated tokens during text
+ generation.
+ """
+
+ max_new_tokens: int
+ """The maximum number of tokens the model can generate in a response."""
+
+ seed: int
+ """
+ This parameter controls the randomness of how the model selects the next tokens
+ during text generation.
+ """
+
+ temperature: float
+ """The sampling temperature, which affects the randomness in the response."""
+
+ top_p: float
+ """
+ A parameter for nucleus sampling, an alternative to `temperature` which also
+ affects the randomness of the response.
+ """
+
+
+class AgentConfigsGlobalConfig(TypedDict, total=False):
+ enable_filter: bool
+ """Enables filtering of retrieved chunks with a separate LLM"""
+
+ enable_multi_turn: bool
+ """Enables multi-turn conversations.
+
+ This feature is currently experimental and will be improved.
+ """
+
+ enable_rerank: bool
+ """Enables reranking of retrieved chunks"""
+
+
+class AgentConfigsRetrievalConfig(TypedDict, total=False):
+ lexical_alpha: float
+ """The weight of lexical search during retrieval"""
+
+ semantic_alpha: float
+ """The weight of semantic search during retrieval"""
+
+ top_k_retrieved_chunks: int
+ """The maximum number of retrieved chunks from the datastore."""
+
+
+class AgentConfigs(TypedDict, total=False):
+ filter_and_rerank_config: AgentConfigsFilterAndRerankConfig
+ """Parameters that affect filtering and reranking of retrieved knowledge"""
+
+ generate_response_config: AgentConfigsGenerateResponseConfig
+ """Parameters that affect response generation"""
+
+ global_config: AgentConfigsGlobalConfig
+ """Parameters that affect the agent's overall RAG workflow"""
+
+ retrieval_config: AgentConfigsRetrievalConfig
+ """Parameters that affect how the agent retrieves from datastore(s)"""
diff --git a/src/contextual/types/agents/datasets/__init__.py b/src/contextual/types/agents/datasets/__init__.py
index 3cfcda45..56eabefe 100644
--- a/src/contextual/types/agents/datasets/__init__.py
+++ b/src/contextual/types/agents/datasets/__init__.py
@@ -2,7 +2,12 @@
from __future__ import annotations
+from .tune_list_params import TuneListParams as TuneListParams
+from .tune_create_params import TuneCreateParams as TuneCreateParams
+from .tune_update_params import TuneUpdateParams as TuneUpdateParams
from .evaluate_list_params import EvaluateListParams as EvaluateListParams
+from .tune_metadata_params import TuneMetadataParams as TuneMetadataParams
+from .tune_retrieve_params import TuneRetrieveParams as TuneRetrieveParams
from .evaluate_create_params import EvaluateCreateParams as EvaluateCreateParams
from .evaluate_update_params import EvaluateUpdateParams as EvaluateUpdateParams
from .evaluate_metadata_params import EvaluateMetadataParams as EvaluateMetadataParams
diff --git a/src/contextual/types/agents/datasets/tune_create_params.py b/src/contextual/types/agents/datasets/tune_create_params.py
new file mode 100644
index 00000000..a2504a5c
--- /dev/null
+++ b/src/contextual/types/agents/datasets/tune_create_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import FileTypes
+
+__all__ = ["TuneCreateParams"]
+
+
+class TuneCreateParams(TypedDict, total=False):
+ dataset_name: Required[str]
+ """Name of the tune dataset"""
+
+ dataset_type: Required[Literal["tuning_set"]]
+ """Type of tune dataset which determines its schema and validation rules."""
+
+ file: Required[FileTypes]
+ """JSONL or CSV file containing the tune dataset"""
diff --git a/src/contextual/types/agents/datasets/tune_list_params.py b/src/contextual/types/agents/datasets/tune_list_params.py
new file mode 100644
index 00000000..59702ad7
--- /dev/null
+++ b/src/contextual/types/agents/datasets/tune_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["TuneListParams"]
+
+
+class TuneListParams(TypedDict, total=False):
+ dataset_name: str
+ """Optional dataset name to filter the results by.
+
+ If provided, only versions from that dataset are listed.
+ """
diff --git a/src/contextual/types/agents/datasets/tune_metadata_params.py b/src/contextual/types/agents/datasets/tune_metadata_params.py
new file mode 100644
index 00000000..a9352635
--- /dev/null
+++ b/src/contextual/types/agents/datasets/tune_metadata_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["TuneMetadataParams"]
+
+
+class TuneMetadataParams(TypedDict, total=False):
+ agent_id: Required[str]
+ """Agent ID associated with the tune dataset"""
+
+ version: str
+ """Version number of the dataset. Defaults to the latest version if not specified."""
diff --git a/src/contextual/types/agents/datasets/tune_retrieve_params.py b/src/contextual/types/agents/datasets/tune_retrieve_params.py
new file mode 100644
index 00000000..2192e7e8
--- /dev/null
+++ b/src/contextual/types/agents/datasets/tune_retrieve_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["TuneRetrieveParams"]
+
+
+class TuneRetrieveParams(TypedDict, total=False):
+ agent_id: Required[str]
+ """Agent ID associated with the tune dataset"""
+
+ batch_size: int
+ """Batch size for processing"""
+
+ version: str
+ """Version number of the tune dataset to retrieve.
+
+ Defaults to the latest version if not specified.
+ """
diff --git a/src/contextual/types/agents/datasets/tune_update_params.py b/src/contextual/types/agents/datasets/tune_update_params.py
new file mode 100644
index 00000000..8e08aeac
--- /dev/null
+++ b/src/contextual/types/agents/datasets/tune_update_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from ...._types import FileTypes
+
+__all__ = ["TuneUpdateParams"]
+
+
+class TuneUpdateParams(TypedDict, total=False):
+ agent_id: Required[str]
+ """Agent ID associated with the tune dataset"""
+
+ dataset_type: Required[Literal["tuning_set"]]
+ """Type of tune dataset which determines its schema and validation rules.
+
+ Must match the `dataset_type` used at dataset creation time.
+ """
+
+ file: Required[FileTypes]
+ """JSONL or CSV file containing the entries to append to the tune dataset"""
diff --git a/src/contextual/types/agents/evaluate/evaluation_job_metadata.py b/src/contextual/types/agents/evaluate/evaluation_job_metadata.py
index 3c7b2ea5..9be981da 100644
--- a/src/contextual/types/agents/evaluate/evaluation_job_metadata.py
+++ b/src/contextual/types/agents/evaluate/evaluation_job_metadata.py
@@ -15,6 +15,9 @@ class JobMetadata(BaseModel):
num_predictions: Optional[int] = None
"""Total number of predictions made during the evaluation job"""
+ num_processed_predictions: Optional[int] = None
+ """Number of predictions that were processed during the evaluation job"""
+
num_successful_predictions: Optional[int] = None
"""Number of predictions that were successful during the evaluation job"""
diff --git a/src/contextual/types/agents/evaluate/list_evaluation_jobs_response.py b/src/contextual/types/agents/evaluate/list_evaluation_jobs_response.py
index 5e22201f..24038c29 100644
--- a/src/contextual/types/agents/evaluate/list_evaluation_jobs_response.py
+++ b/src/contextual/types/agents/evaluate/list_evaluation_jobs_response.py
@@ -37,6 +37,9 @@ class EvaluationRound(BaseModel):
processing_started_at: Optional[datetime] = None
"""Timestamp indicating when the evaluation round started processing"""
+ results_dataset_name: Optional[str] = None
+ """Name of the dataset with the evaluation results"""
+
summary_results: Optional[object] = None
"""Score of the evaluation round"""
diff --git a/src/contextual/types/agents/query_create_params.py b/src/contextual/types/agents/query_create_params.py
index 3cbe5c8b..73b177a3 100644
--- a/src/contextual/types/agents/query_create_params.py
+++ b/src/contextual/types/agents/query_create_params.py
@@ -17,12 +17,13 @@ class QueryCreateParams(TypedDict, total=False):
"""
include_retrieval_content_text: bool
- """Ignored if `retrievals_only` is True.
+ """Set to `true` to include the text of the retrieved contents in the response.
- Set to `true` to include the text of the retrieved contents in the response. If
- `false`, only metadata about the retrieved contents will be included, not
- content text. Content text and other metadata can also be fetched separately
- using the `/agents/{agent_id}/query/{message_id}/retrieval/info` endpoint.
+ If `false`, only metadata about the retrieved contents will be included, not
+ content text. This parameter is ignored if `retrievals_only` is `true`, in which
+ case `content_text` will always be returned. Content text and other metadata can
+ also be fetched separately using the
+ `/agents/{agent_id}/query/{message_id}/retrieval/info` endpoint.
"""
retrievals_only: bool
@@ -52,5 +53,5 @@ class Message(TypedDict, total=False):
content: Required[str]
"""Content of the message"""
- role: Required[Literal["user", "system", "assistant"]]
+ role: Required[Literal["user", "system", "assistant", "knowledge"]]
"""Role of the sender"""
diff --git a/src/contextual/types/agents/query_response.py b/src/contextual/types/agents/query_response.py
index 3444768e..56ae9e02 100644
--- a/src/contextual/types/agents/query_response.py
+++ b/src/contextual/types/agents/query_response.py
@@ -18,7 +18,7 @@ class RetrievalContent(BaseModel):
doc_name: str
"""Name of the document"""
- format: Literal["pdf", "html"]
+ format: Literal["pdf", "html", "htm"]
"""Format of the content, such as `pdf` or `html`"""
type: str
@@ -58,7 +58,7 @@ class Message(BaseModel):
content: str
"""Content of the message"""
- role: Literal["user", "system", "assistant"]
+ role: Literal["user", "system", "assistant", "knowledge"]
"""Role of the sender"""
diff --git a/src/contextual/types/agents/retrieval_info_response.py b/src/contextual/types/agents/retrieval_info_response.py
index ca9d8f86..6f7d96ab 100644
--- a/src/contextual/types/agents/retrieval_info_response.py
+++ b/src/contextual/types/agents/retrieval_info_response.py
@@ -17,6 +17,9 @@ class ContentMetadata(BaseModel):
height: float
"""Height of the image."""
+ page: int
+ """Page number of the content."""
+
page_img: str
"""Image of the page on which the content occurs."""
diff --git a/src/contextual/types/agents/tune/list_tune_jobs_response.py b/src/contextual/types/agents/tune/list_tune_jobs_response.py
index d1fa94a0..1248eb58 100644
--- a/src/contextual/types/agents/tune/list_tune_jobs_response.py
+++ b/src/contextual/types/agents/tune/list_tune_jobs_response.py
@@ -1,51 +1,16 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Optional
+from typing import List
-from ...._compat import PYDANTIC_V2, ConfigDict
from ...._models import BaseModel
+from .tune_job_metadata import TuneJobMetadata
-__all__ = ["ListTuneJobsResponse", "Job"]
-
-
-class Job(BaseModel):
- id: str
- """ID of the tune job"""
-
- job_status: str
- """Status of the tune job.
-
- There are four possible statuses: 'failed', 'pending', 'processing' and
- 'completed'.
- """
-
- evaluation_results: Optional[Dict[str, float]] = None
- """
- Evaluation results of the tuned model, represented as an object mapping metric
- names (strings) to their scores (floats). Omitted if the tuning job failed or is
- still in progress.
- """
-
- model_id: Optional[str] = None
- """ID of the tuned model.
-
- Omitted if the tuning job failed or is still in progress.
- """
-
- if PYDANTIC_V2:
- # allow fields with a `model_` prefix
- model_config = ConfigDict(protected_namespaces=tuple())
+__all__ = ["ListTuneJobsResponse"]
class ListTuneJobsResponse(BaseModel):
- jobs: List[Job]
- """List of tune jobs"""
-
- next_cursor: Optional[str] = None
- """Next cursor to continue pagination.
-
- Omitted if there are no more specialization jobs.
- """
+ jobs: List[TuneJobMetadata]
+ """List of fine-tuning jobs for the agent"""
- total_count: Optional[int] = None
- """Total number of available specialization jobs"""
+ total_count: int
+ """Total number of jobs associated with the agent"""
diff --git a/src/contextual/types/agents/tune/list_tune_models_response.py b/src/contextual/types/agents/tune/list_tune_models_response.py
index 963655b4..ffc51362 100644
--- a/src/contextual/types/agents/tune/list_tune_models_response.py
+++ b/src/contextual/types/agents/tune/list_tune_models_response.py
@@ -2,6 +2,7 @@
from typing import List
from datetime import datetime
+from typing_extensions import Literal
from ...._compat import PYDANTIC_V2, ConfigDict
from ...._models import BaseModel
@@ -10,6 +11,9 @@
class Model(BaseModel):
+ application_id: str
+ """ID of the associated agent"""
+
created_at: datetime
"""Timestamp indicating when the model was created"""
@@ -19,6 +23,9 @@ class Model(BaseModel):
model_id: str
"""ID of the registered model"""
+ state: Literal["active", "inactive", "pending"]
+ """State of the model"""
+
if PYDANTIC_V2:
# allow fields with a `model_` prefix
model_config = ConfigDict(protected_namespaces=tuple())
@@ -26,7 +33,7 @@ class Model(BaseModel):
class ListTuneModelsResponse(BaseModel):
models: List[Model]
- """List of registered models for the application"""
+ """List of registered models for the agent"""
- total: int
- """Total number of models associated with the application"""
+ total_count: int
+ """Total number of models associated with the agent"""
diff --git a/src/contextual/types/agents/tune/tune_job_metadata.py b/src/contextual/types/agents/tune/tune_job_metadata.py
index a64be7c0..e71d9307 100644
--- a/src/contextual/types/agents/tune/tune_job_metadata.py
+++ b/src/contextual/types/agents/tune/tune_job_metadata.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, Optional
+from typing import Optional
from ...._compat import PYDANTIC_V2, ConfigDict
from ...._models import BaseModel
@@ -9,22 +9,17 @@
class TuneJobMetadata(BaseModel):
- job_status: str
- """Status of the tune job.
+ id: str
+ """ID of the tune job"""
- There are four possible statuses: 'failed', 'pending', 'processing',
- 'completed'.
- """
+ job_status: str
+ """Status of the tune job"""
- evaluation_results: Optional[Dict[str, float]] = None
- """
- Evaluation results of the tuned model, represented as a dictionary mapping
- metric names (strings) to their scores (floats). Omitted if the tuning job
- failed or is still in progress.
- """
+ evaluation_metadata: Optional[object] = None
+ """Metadata about the model evaluation, including status and results if completed."""
model_id: Optional[str] = None
- """ID of the trained model.
+ """ID of the tuned model.
Omitted if the tuning job failed or is still in progress.
"""
diff --git a/src/contextual/types/agents/tune_create_params.py b/src/contextual/types/agents/tune_create_params.py
index 7d90470e..ac1e1cdc 100644
--- a/src/contextual/types/agents/tune_create_params.py
+++ b/src/contextual/types/agents/tune_create_params.py
@@ -2,7 +2,8 @@
from __future__ import annotations
-from typing_extensions import Required, TypedDict
+from typing import Optional
+from typing_extensions import TypedDict
from ..._types import FileTypes
@@ -10,7 +11,27 @@
class TuneCreateParams(TypedDict, total=False):
- training_file: Required[FileTypes]
+ test_dataset_name: Optional[str]
+ """Optional.
+
+ `Dataset` to use for testing model checkpoints, created through the
+ `/datasets/evaluate` API.
+ """
+
+ test_file: Optional[FileTypes]
+ """Optional.
+
+ Local path to the test data file. The test file should follow the same format as
+ the training data file.
+ """
+
+ train_dataset_name: Optional[str]
+ """`Dataset` to use for training, created through the `/datasets/tune` API.
+
+ Either `train_dataset_name` or `training_file` must be provided, but not both.
+ """
+
+ training_file: Optional[FileTypes]
"""Local path to the training data file.
The file should be in JSON array format, where each element of the array is a
@@ -22,7 +43,9 @@ class TuneCreateParams(TypedDict, total=False):
- `reference` (`str`): The gold-standard answer to the prompt.
- - `guideline` (`str`): Guidelines for model output.
+ - `guideline` (`str`): Guidelines for model output. If you do not have special
+ guidelines for the model's output, you can use the `System Prompt` defined in
+ your Agent configuration as the `guideline`.
- `prompt` (`str`): Question for the model to respond to.
@@ -44,16 +67,3 @@ class TuneCreateParams(TypedDict, total=False):
]
```
"""
-
- model_id: str
- """ID of an existing model to tune.
-
- Defaults to the agent's default model if not specified.
- """
-
- test_file: FileTypes
- """Optional.
-
- Local path to the test data file. The test file should follow the same format as
- the training data file.
- """
diff --git a/src/contextual/types/datastores/__init__.py b/src/contextual/types/datastores/__init__.py
index 481b7fd9..030145c4 100644
--- a/src/contextual/types/datastores/__init__.py
+++ b/src/contextual/types/datastores/__init__.py
@@ -7,3 +7,4 @@
from .document_list_params import DocumentListParams as DocumentListParams
from .document_ingest_params import DocumentIngestParams as DocumentIngestParams
from .list_documents_response import ListDocumentsResponse as ListDocumentsResponse
+from .document_set_metadata_params import DocumentSetMetadataParams as DocumentSetMetadataParams
diff --git a/src/contextual/types/datastores/document_ingest_params.py b/src/contextual/types/datastores/document_ingest_params.py
index 73481696..295c0dc4 100644
--- a/src/contextual/types/datastores/document_ingest_params.py
+++ b/src/contextual/types/datastores/document_ingest_params.py
@@ -12,3 +12,13 @@
class DocumentIngestParams(TypedDict, total=False):
file: Required[FileTypes]
"""File to ingest"""
+
+ metadata: str
+ """Metadata in `JSON` format.
+
+ Metadata should be passed in a nested dictionary structure of `str` metadata
+ type to `Dict` mapping `str` metadata keys to `str`, `bool`, `float` or `int`
+ values. Currently, `custom_metadata` is the only supported metadata type.Example
+ `metadata` dictionary: {"metadata": {"custom_metadata": {"customKey1": "value3",
+ "\\__filterKey": "filterValue3"}}
+ """
diff --git a/src/contextual/types/datastores/document_metadata.py b/src/contextual/types/datastores/document_metadata.py
index 53c41f95..6d8dbdac 100644
--- a/src/contextual/types/datastores/document_metadata.py
+++ b/src/contextual/types/datastores/document_metadata.py
@@ -1,5 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from typing import Dict, Union, Optional
+from typing_extensions import Literal
from ..._models import BaseModel
@@ -10,8 +12,16 @@ class DocumentMetadata(BaseModel):
id: str
"""ID of the document that was ingested"""
+ created_at: str
+ """Timestamp of when the document was created in ISO format."""
+
name: str
"""User specified name of the document"""
- status: str
+ status: Literal["pending", "processing", "retrying", "completed", "failed", "cancelled"]
"""Status of this document's ingestion job"""
+
+ custom_metadata: Optional[Dict[str, Union[bool, float, str]]] = None
+
+ updated_at: Optional[str] = None
+ """Timestamp of when the document was modified in ISO format."""
diff --git a/src/contextual/types/datastores/document_set_metadata_params.py b/src/contextual/types/datastores/document_set_metadata_params.py
new file mode 100644
index 00000000..f92d6b62
--- /dev/null
+++ b/src/contextual/types/datastores/document_set_metadata_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union
+from typing_extensions import Required, TypedDict
+
+__all__ = ["DocumentSetMetadataParams"]
+
+
+class DocumentSetMetadataParams(TypedDict, total=False):
+ datastore_id: Required[str]
+ """Datastore ID of the datastore from which to retrieve the document"""
+
+ custom_metadata: Dict[str, Union[bool, float, str]]
diff --git a/src/contextual/types/generate_create_params.py b/src/contextual/types/generate_create_params.py
index ccb6e0d6..c0cd4658 100644
--- a/src/contextual/types/generate_create_params.py
+++ b/src/contextual/types/generate_create_params.py
@@ -21,6 +21,15 @@ class GenerateCreateParams(TypedDict, total=False):
model: Required[str]
"""The version of the Contextual's GLM to use. Currently, we just have "v1"."""
+ avoid_commentary: bool
+ """
+ Flag to indicate whether the model should avoid providing additional commentary
+ in responses. Commentary is conversational in nature and does not contain
+ verifiable claims; therefore, commentary is not strictly grounded in available
+ context. However, commentary may provide useful context which improves the
+ helpfulness of responses.
+ """
+
system_prompt: str
"""Instructions that the model follows when generating responses.
@@ -32,5 +41,5 @@ class Message(TypedDict, total=False):
content: Required[str]
"""Content of the message"""
- role: Required[Literal["user", "system", "assistant"]]
+ role: Required[Literal["user", "system", "assistant", "knowledge"]]
"""Role of the sender"""
diff --git a/src/contextual/types/invite_users_response.py b/src/contextual/types/invite_users_response.py
new file mode 100644
index 00000000..7414f8fe
--- /dev/null
+++ b/src/contextual/types/invite_users_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List
+
+from .._models import BaseModel
+
+__all__ = ["InviteUsersResponse"]
+
+
+class InviteUsersResponse(BaseModel):
+ error_details: Dict[str, str]
+ """
+ Details of the errors occurred while inviting users, where keys are the emails
+ and values are the error messages
+ """
+
+ invited_user_emails: List[str]
+ """List of emails of the invited users"""
diff --git a/src/contextual/types/list_users_response.py b/src/contextual/types/list_users_response.py
new file mode 100644
index 00000000..fbe10474
--- /dev/null
+++ b/src/contextual/types/list_users_response.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ListUsersResponse", "User", "UserPerAgentRole"]
+
+
+class UserPerAgentRole(BaseModel):
+ agent_id: str
+ """ID of the agent on which to grant/revoke the role."""
+
+ grant: bool
+ """When set to true, the roles will be granted o/w revoked."""
+
+ roles: List[Literal["AGENT_USER"]]
+ """The roles that are granted/revoked"""
+
+
+class User(BaseModel):
+ id: str
+
+ email: str
+ """The email of the user"""
+
+ is_tenant_admin: Optional[bool] = None
+ """Flag indicating if the user is a tenant admin"""
+
+ per_agent_roles: Optional[List[UserPerAgentRole]] = None
+ """Per agent level roles for the user.
+
+ If a user is granted any role under `roles`, then the user has that role for all
+ the agents. Only the roles that need to be updated should be part of this.
+ """
+
+ roles: Optional[List[Literal["AGENT_USER"]]] = None
+ """The user level roles of the user."""
+
+
+class ListUsersResponse(BaseModel):
+ users: List[User]
+ """List of users"""
+
+ next_cursor: Optional[str] = None
+ """Cursor for the beginning of the next page"""
diff --git a/src/contextual/types/new_user_param.py b/src/contextual/types/new_user_param.py
new file mode 100644
index 00000000..dd25792c
--- /dev/null
+++ b/src/contextual/types/new_user_param.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["NewUserParam", "PerAgentRole"]
+
+
+class PerAgentRole(TypedDict, total=False):
+ agent_id: Required[str]
+ """ID of the agent on which to grant/revoke the role."""
+
+ grant: Required[bool]
+ """When set to true, the roles will be granted o/w revoked."""
+
+ roles: Required[List[Literal["AGENT_USER"]]]
+ """The roles that are granted/revoked"""
+
+
+class NewUserParam(TypedDict, total=False):
+ email: Required[str]
+ """The email of the user"""
+
+ is_tenant_admin: bool
+ """Flag indicating if the user is a tenant admin"""
+
+ per_agent_roles: Iterable[PerAgentRole]
+ """Per agent level roles for the user.
+
+ If a user is granted any role under `roles`, then the user has that role for all
+ the agents. Only the roles that need to be updated should be part of this.
+ """
+
+ roles: List[Literal["AGENT_USER"]]
+ """The user level roles of the user."""
diff --git a/src/contextual/types/rerank_create_params.py b/src/contextual/types/rerank_create_params.py
index 06ace6cf..5b4a9160 100644
--- a/src/contextual/types/rerank_create_params.py
+++ b/src/contextual/types/rerank_create_params.py
@@ -18,5 +18,14 @@ class RerankCreateParams(TypedDict, total=False):
query: Required[str]
"""The string against which documents will be ranked for relevance"""
+ instruction: str
+ """The instruction to be used for the reranker"""
+
+ metadata: List[str]
+ """Metadata for documents being passed to the reranker.
+
+ Must be the same length as the documents list.
+ """
+
top_n: int
"""The number of top-ranked results to return"""
diff --git a/src/contextual/types/user_deactivate_params.py b/src/contextual/types/user_deactivate_params.py
new file mode 100644
index 00000000..b7ace943
--- /dev/null
+++ b/src/contextual/types/user_deactivate_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["UserDeactivateParams"]
+
+
+class UserDeactivateParams(TypedDict, total=False):
+ email: Required[str]
+ """The email of the user"""
diff --git a/src/contextual/types/user_invite_params.py b/src/contextual/types/user_invite_params.py
new file mode 100644
index 00000000..8301593f
--- /dev/null
+++ b/src/contextual/types/user_invite_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+from .new_user_param import NewUserParam
+
+__all__ = ["UserInviteParams"]
+
+
+class UserInviteParams(TypedDict, total=False):
+ new_users: Required[Iterable[NewUserParam]]
+ """List of new users to be invited"""
+
+ tenant_short_name: Required[str]
+ """The short name of the tenant"""
diff --git a/src/contextual/types/user_list_params.py b/src/contextual/types/user_list_params.py
new file mode 100644
index 00000000..c453d1c9
--- /dev/null
+++ b/src/contextual/types/user_list_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["UserListParams"]
+
+
+class UserListParams(TypedDict, total=False):
+ cursor: str
+ """Cursor for the beginning of the current page"""
+
+ deactivated: bool
+ """When set to true, return deactivated users instead."""
+
+ limit: int
+ """Number of users to return"""
+
+ search: str
+ """Query to filter users by email"""
diff --git a/src/contextual/types/user_update_params.py b/src/contextual/types/user_update_params.py
new file mode 100644
index 00000000..f41a6adf
--- /dev/null
+++ b/src/contextual/types/user_update_params.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["UserUpdateParams", "PerAgentRole"]
+
+
+class UserUpdateParams(TypedDict, total=False):
+ email: Required[str]
+ """The email of the user"""
+
+ is_tenant_admin: bool
+ """Flag indicating if the user is a tenant admin"""
+
+ per_agent_roles: Iterable[PerAgentRole]
+ """Per agent level roles for the user.
+
+ If a user is granted any role under `roles`, then the user has that role for all
+ the agents. Only the roles that need to be updated should be part of this.
+ """
+
+ roles: List[Literal["AGENT_USER"]]
+ """The user level roles of the user."""
+
+
+class PerAgentRole(TypedDict, total=False):
+ agent_id: Required[str]
+ """ID of the agent on which to grant/revoke the role."""
+
+ grant: Required[bool]
+ """When set to true, the roles will be granted o/w revoked."""
+
+ roles: Required[List[Literal["AGENT_USER"]]]
+ """The roles that are granted/revoked"""
diff --git a/tests/api_resources/agents/datasets/test_tune.py b/tests/api_resources/agents/datasets/test_tune.py
new file mode 100644
index 00000000..704dac46
--- /dev/null
+++ b/tests/api_resources/agents/datasets/test_tune.py
@@ -0,0 +1,716 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import httpx
+import pytest
+from respx import MockRouter
+
+from contextual import ContextualAI, AsyncContextualAI
+from tests.utils import assert_matches_type
+from contextual._response import (
+ BinaryAPIResponse,
+ AsyncBinaryAPIResponse,
+ StreamedBinaryAPIResponse,
+ AsyncStreamedBinaryAPIResponse,
+)
+from contextual.types.agents import DatasetMetadata, ListDatasetsResponse, CreateDatasetResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestTune:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: ContextualAI) -> None:
+ tune = client.agents.datasets.tune.create(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_name="dataset_name",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: ContextualAI) -> None:
+ response = client.agents.datasets.tune.with_raw_response.create(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_name="dataset_name",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = response.parse()
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: ContextualAI) -> None:
+ with client.agents.datasets.tune.with_streaming_response.create(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_name="dataset_name",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = response.parse()
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ client.agents.datasets.tune.with_raw_response.create(
+ agent_id="",
+ dataset_name="dataset_name",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ def test_method_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ tune = client.agents.datasets.tune.retrieve(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert tune.is_closed
+ assert tune.json() == {"foo": "bar"}
+ assert cast(Any, tune.is_closed) is True
+ assert isinstance(tune, BinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ def test_method_retrieve_with_all_params(self, client: ContextualAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ tune = client.agents.datasets.tune.retrieve(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ batch_size=1,
+ version="version",
+ )
+ assert tune.is_closed
+ assert tune.json() == {"foo": "bar"}
+ assert cast(Any, tune.is_closed) is True
+ assert isinstance(tune, BinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ def test_raw_response_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+
+ tune = client.agents.datasets.tune.with_raw_response.retrieve(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert tune.is_closed is True
+ assert tune.http_request.headers.get("X-Stainless-Lang") == "python"
+ assert tune.json() == {"foo": "bar"}
+ assert isinstance(tune, BinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ def test_streaming_response_retrieve(self, client: ContextualAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ with client.agents.datasets.tune.with_streaming_response.retrieve(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as tune:
+ assert not tune.is_closed
+ assert tune.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ assert tune.json() == {"foo": "bar"}
+ assert cast(Any, tune.is_closed) is True
+ assert isinstance(tune, StreamedBinaryAPIResponse)
+
+ assert cast(Any, tune.is_closed) is True
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ def test_path_params_retrieve(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ client.agents.datasets.tune.with_raw_response.retrieve(
+ dataset_name="dataset_name",
+ agent_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
+ client.agents.datasets.tune.with_raw_response.retrieve(
+ dataset_name="",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ @parametrize
+ def test_method_update(self, client: ContextualAI) -> None:
+ tune = client.agents.datasets.tune.update(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: ContextualAI) -> None:
+ response = client.agents.datasets.tune.with_raw_response.update(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = response.parse()
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: ContextualAI) -> None:
+ with client.agents.datasets.tune.with_streaming_response.update(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = response.parse()
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ client.agents.datasets.tune.with_raw_response.update(
+ dataset_name="dataset_name",
+ agent_id="",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
+ client.agents.datasets.tune.with_raw_response.update(
+ dataset_name="",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ @parametrize
+ def test_method_list(self, client: ContextualAI) -> None:
+ tune = client.agents.datasets.tune.list(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(ListDatasetsResponse, tune, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: ContextualAI) -> None:
+ tune = client.agents.datasets.tune.list(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_name="dataset_name",
+ )
+ assert_matches_type(ListDatasetsResponse, tune, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: ContextualAI) -> None:
+ response = client.agents.datasets.tune.with_raw_response.list(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = response.parse()
+ assert_matches_type(ListDatasetsResponse, tune, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: ContextualAI) -> None:
+ with client.agents.datasets.tune.with_streaming_response.list(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = response.parse()
+ assert_matches_type(ListDatasetsResponse, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ client.agents.datasets.tune.with_raw_response.list(
+ agent_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: ContextualAI) -> None:
+ tune = client.agents.datasets.tune.delete(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(object, tune, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: ContextualAI) -> None:
+ response = client.agents.datasets.tune.with_raw_response.delete(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = response.parse()
+ assert_matches_type(object, tune, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: ContextualAI) -> None:
+ with client.agents.datasets.tune.with_streaming_response.delete(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = response.parse()
+ assert_matches_type(object, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ client.agents.datasets.tune.with_raw_response.delete(
+ dataset_name="dataset_name",
+ agent_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
+ client.agents.datasets.tune.with_raw_response.delete(
+ dataset_name="",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ @parametrize
+ def test_method_metadata(self, client: ContextualAI) -> None:
+ tune = client.agents.datasets.tune.metadata(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(DatasetMetadata, tune, path=["response"])
+
+ @parametrize
+ def test_method_metadata_with_all_params(self, client: ContextualAI) -> None:
+ tune = client.agents.datasets.tune.metadata(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ version="version",
+ )
+ assert_matches_type(DatasetMetadata, tune, path=["response"])
+
+ @parametrize
+ def test_raw_response_metadata(self, client: ContextualAI) -> None:
+ response = client.agents.datasets.tune.with_raw_response.metadata(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = response.parse()
+ assert_matches_type(DatasetMetadata, tune, path=["response"])
+
+ @parametrize
+ def test_streaming_response_metadata(self, client: ContextualAI) -> None:
+ with client.agents.datasets.tune.with_streaming_response.metadata(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = response.parse()
+ assert_matches_type(DatasetMetadata, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_metadata(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ client.agents.datasets.tune.with_raw_response.metadata(
+ dataset_name="dataset_name",
+ agent_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
+ client.agents.datasets.tune.with_raw_response.metadata(
+ dataset_name="",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+
+class TestAsyncTune:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncContextualAI) -> None:
+ tune = await async_client.agents.datasets.tune.create(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_name="dataset_name",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.agents.datasets.tune.with_raw_response.create(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_name="dataset_name",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = await response.parse()
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.agents.datasets.tune.with_streaming_response.create(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_name="dataset_name",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = await response.parse()
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.create(
+ agent_id="",
+ dataset_name="dataset_name",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ async def test_method_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ tune = await async_client.agents.datasets.tune.retrieve(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert tune.is_closed
+ assert await tune.json() == {"foo": "bar"}
+ assert cast(Any, tune.is_closed) is True
+ assert isinstance(tune, AsyncBinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ async def test_method_retrieve_with_all_params(
+ self, async_client: AsyncContextualAI, respx_mock: MockRouter
+ ) -> None:
+ respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ tune = await async_client.agents.datasets.tune.retrieve(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ batch_size=1,
+ version="version",
+ )
+ assert tune.is_closed
+ assert await tune.json() == {"foo": "bar"}
+ assert cast(Any, tune.is_closed) is True
+ assert isinstance(tune, AsyncBinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ async def test_raw_response_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+
+ tune = await async_client.agents.datasets.tune.with_raw_response.retrieve(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert tune.is_closed is True
+ assert tune.http_request.headers.get("X-Stainless-Lang") == "python"
+ assert await tune.json() == {"foo": "bar"}
+ assert isinstance(tune, AsyncBinaryAPIResponse)
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ async def test_streaming_response_retrieve(self, async_client: AsyncContextualAI, respx_mock: MockRouter) -> None:
+ respx_mock.get("/agents/182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e/datasets/tune/dataset_name").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
+ async with async_client.agents.datasets.tune.with_streaming_response.retrieve(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as tune:
+ assert not tune.is_closed
+ assert tune.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ assert await tune.json() == {"foo": "bar"}
+ assert cast(Any, tune.is_closed) is True
+ assert isinstance(tune, AsyncStreamedBinaryAPIResponse)
+
+ assert cast(Any, tune.is_closed) is True
+
+ @parametrize
+ @pytest.mark.respx(base_url=base_url)
+ async def test_path_params_retrieve(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.retrieve(
+ dataset_name="dataset_name",
+ agent_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.retrieve(
+ dataset_name="",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncContextualAI) -> None:
+ tune = await async_client.agents.datasets.tune.update(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.agents.datasets.tune.with_raw_response.update(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = await response.parse()
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.agents.datasets.tune.with_streaming_response.update(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = await response.parse()
+ assert_matches_type(CreateDatasetResponse, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.update(
+ dataset_name="dataset_name",
+ agent_id="",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.update(
+ dataset_name="",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_type="tuning_set",
+ file=b"raw file contents",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncContextualAI) -> None:
+ tune = await async_client.agents.datasets.tune.list(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(ListDatasetsResponse, tune, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncContextualAI) -> None:
+ tune = await async_client.agents.datasets.tune.list(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ dataset_name="dataset_name",
+ )
+ assert_matches_type(ListDatasetsResponse, tune, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.agents.datasets.tune.with_raw_response.list(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = await response.parse()
+ assert_matches_type(ListDatasetsResponse, tune, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.agents.datasets.tune.with_streaming_response.list(
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = await response.parse()
+ assert_matches_type(ListDatasetsResponse, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.list(
+ agent_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncContextualAI) -> None:
+ tune = await async_client.agents.datasets.tune.delete(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(object, tune, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.agents.datasets.tune.with_raw_response.delete(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = await response.parse()
+ assert_matches_type(object, tune, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.agents.datasets.tune.with_streaming_response.delete(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = await response.parse()
+ assert_matches_type(object, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.delete(
+ dataset_name="dataset_name",
+ agent_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.delete(
+ dataset_name="",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ @parametrize
+ async def test_method_metadata(self, async_client: AsyncContextualAI) -> None:
+ tune = await async_client.agents.datasets.tune.metadata(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(DatasetMetadata, tune, path=["response"])
+
+ @parametrize
+ async def test_method_metadata_with_all_params(self, async_client: AsyncContextualAI) -> None:
+ tune = await async_client.agents.datasets.tune.metadata(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ version="version",
+ )
+ assert_matches_type(DatasetMetadata, tune, path=["response"])
+
+ @parametrize
+ async def test_raw_response_metadata(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.agents.datasets.tune.with_raw_response.metadata(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tune = await response.parse()
+ assert_matches_type(DatasetMetadata, tune, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_metadata(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.agents.datasets.tune.with_streaming_response.metadata(
+ dataset_name="dataset_name",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tune = await response.parse()
+ assert_matches_type(DatasetMetadata, tune, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_metadata(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.metadata(
+ dataset_name="dataset_name",
+ agent_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_name` but received ''"):
+ await async_client.agents.datasets.tune.with_raw_response.metadata(
+ dataset_name="",
+ agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
diff --git a/tests/api_resources/agents/test_tune.py b/tests/api_resources/agents/test_tune.py
index 6f3bb451..18871a25 100644
--- a/tests/api_resources/agents/test_tune.py
+++ b/tests/api_resources/agents/test_tune.py
@@ -21,7 +21,6 @@ class TestTune:
def test_method_create(self, client: ContextualAI) -> None:
tune = client.agents.tune.create(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- training_file=b"raw file contents",
)
assert_matches_type(CreateTuneResponse, tune, path=["response"])
@@ -29,9 +28,10 @@ def test_method_create(self, client: ContextualAI) -> None:
def test_method_create_with_all_params(self, client: ContextualAI) -> None:
tune = client.agents.tune.create(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- training_file=b"raw file contents",
- model_id="model_id",
+ test_dataset_name="test_dataset_name",
test_file=b"raw file contents",
+ train_dataset_name="train_dataset_name",
+ training_file=b"raw file contents",
)
assert_matches_type(CreateTuneResponse, tune, path=["response"])
@@ -39,7 +39,6 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None:
def test_raw_response_create(self, client: ContextualAI) -> None:
response = client.agents.tune.with_raw_response.create(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- training_file=b"raw file contents",
)
assert response.is_closed is True
@@ -51,7 +50,6 @@ def test_raw_response_create(self, client: ContextualAI) -> None:
def test_streaming_response_create(self, client: ContextualAI) -> None:
with client.agents.tune.with_streaming_response.create(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- training_file=b"raw file contents",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -66,7 +64,6 @@ def test_path_params_create(self, client: ContextualAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
client.agents.tune.with_raw_response.create(
agent_id="",
- training_file=b"raw file contents",
)
@@ -77,7 +74,6 @@ class TestAsyncTune:
async def test_method_create(self, async_client: AsyncContextualAI) -> None:
tune = await async_client.agents.tune.create(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- training_file=b"raw file contents",
)
assert_matches_type(CreateTuneResponse, tune, path=["response"])
@@ -85,9 +81,10 @@ async def test_method_create(self, async_client: AsyncContextualAI) -> None:
async def test_method_create_with_all_params(self, async_client: AsyncContextualAI) -> None:
tune = await async_client.agents.tune.create(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- training_file=b"raw file contents",
- model_id="model_id",
+ test_dataset_name="test_dataset_name",
test_file=b"raw file contents",
+ train_dataset_name="train_dataset_name",
+ training_file=b"raw file contents",
)
assert_matches_type(CreateTuneResponse, tune, path=["response"])
@@ -95,7 +92,6 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual
async def test_raw_response_create(self, async_client: AsyncContextualAI) -> None:
response = await async_client.agents.tune.with_raw_response.create(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- training_file=b"raw file contents",
)
assert response.is_closed is True
@@ -107,7 +103,6 @@ async def test_raw_response_create(self, async_client: AsyncContextualAI) -> Non
async def test_streaming_response_create(self, async_client: AsyncContextualAI) -> None:
async with async_client.agents.tune.with_streaming_response.create(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- training_file=b"raw file contents",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -122,5 +117,4 @@ async def test_path_params_create(self, async_client: AsyncContextualAI) -> None
with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
await async_client.agents.tune.with_raw_response.create(
agent_id="",
- training_file=b"raw file contents",
)
diff --git a/tests/api_resources/datastores/test_documents.py b/tests/api_resources/datastores/test_documents.py
index b8a179b3..e1af38ea 100644
--- a/tests/api_resources/datastores/test_documents.py
+++ b/tests/api_resources/datastores/test_documents.py
@@ -128,6 +128,15 @@ def test_method_ingest(self, client: ContextualAI) -> None:
)
assert_matches_type(IngestionResponse, document, path=["response"])
+ @parametrize
+ def test_method_ingest_with_all_params(self, client: ContextualAI) -> None:
+ document = client.datastores.documents.ingest(
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ file=b"raw file contents",
+ metadata="metadata",
+ )
+ assert_matches_type(IngestionResponse, document, path=["response"])
+
@parametrize
def test_raw_response_ingest(self, client: ContextualAI) -> None:
response = client.datastores.documents.with_raw_response.ingest(
@@ -210,6 +219,63 @@ def test_path_params_metadata(self, client: ContextualAI) -> None:
datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
+ @parametrize
+ def test_method_set_metadata(self, client: ContextualAI) -> None:
+ document = client.datastores.documents.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(DocumentMetadata, document, path=["response"])
+
+ @parametrize
+ def test_method_set_metadata_with_all_params(self, client: ContextualAI) -> None:
+ document = client.datastores.documents.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ custom_metadata={"foo": True},
+ )
+ assert_matches_type(DocumentMetadata, document, path=["response"])
+
+ @parametrize
+ def test_raw_response_set_metadata(self, client: ContextualAI) -> None:
+ response = client.datastores.documents.with_raw_response.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ document = response.parse()
+ assert_matches_type(DocumentMetadata, document, path=["response"])
+
+ @parametrize
+ def test_streaming_response_set_metadata(self, client: ContextualAI) -> None:
+ with client.datastores.documents.with_streaming_response.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ document = response.parse()
+ assert_matches_type(DocumentMetadata, document, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_set_metadata(self, client: ContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `datastore_id` but received ''"):
+ client.datastores.documents.with_raw_response.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `document_id` but received ''"):
+ client.datastores.documents.with_raw_response.set_metadata(
+ document_id="",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
class TestAsyncDocuments:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
@@ -320,6 +386,15 @@ async def test_method_ingest(self, async_client: AsyncContextualAI) -> None:
)
assert_matches_type(IngestionResponse, document, path=["response"])
+ @parametrize
+ async def test_method_ingest_with_all_params(self, async_client: AsyncContextualAI) -> None:
+ document = await async_client.datastores.documents.ingest(
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ file=b"raw file contents",
+ metadata="metadata",
+ )
+ assert_matches_type(IngestionResponse, document, path=["response"])
+
@parametrize
async def test_raw_response_ingest(self, async_client: AsyncContextualAI) -> None:
response = await async_client.datastores.documents.with_raw_response.ingest(
@@ -401,3 +476,60 @@ async def test_path_params_metadata(self, async_client: AsyncContextualAI) -> No
document_id="",
datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
+
+ @parametrize
+ async def test_method_set_metadata(self, async_client: AsyncContextualAI) -> None:
+ document = await async_client.datastores.documents.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(DocumentMetadata, document, path=["response"])
+
+ @parametrize
+ async def test_method_set_metadata_with_all_params(self, async_client: AsyncContextualAI) -> None:
+ document = await async_client.datastores.documents.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ custom_metadata={"foo": True},
+ )
+ assert_matches_type(DocumentMetadata, document, path=["response"])
+
+ @parametrize
+ async def test_raw_response_set_metadata(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.datastores.documents.with_raw_response.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ document = await response.parse()
+ assert_matches_type(DocumentMetadata, document, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_set_metadata(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.datastores.documents.with_streaming_response.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ document = await response.parse()
+ assert_matches_type(DocumentMetadata, document, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_set_metadata(self, async_client: AsyncContextualAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `datastore_id` but received ''"):
+ await async_client.datastores.documents.with_raw_response.set_metadata(
+ document_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ datastore_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `document_id` but received ''"):
+ await async_client.datastores.documents.with_raw_response.set_metadata(
+ document_id="",
+ datastore_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py
index 30eb7dbd..7116967f 100644
--- a/tests/api_resources/test_agents.py
+++ b/tests/api_resources/test_agents.py
@@ -33,6 +33,26 @@ def test_method_create(self, client: ContextualAI) -> None:
def test_method_create_with_all_params(self, client: ContextualAI) -> None:
agent = client.agents.create(
name="xxx",
+ agent_configs={
+ "filter_and_rerank_config": {"top_k_reranked_chunks": 0},
+ "generate_response_config": {
+ "frequency_penalty": 0,
+ "max_new_tokens": 0,
+ "seed": 0,
+ "temperature": 0,
+ "top_p": 0,
+ },
+ "global_config": {
+ "enable_filter": True,
+ "enable_multi_turn": True,
+ "enable_rerank": True,
+ },
+ "retrieval_config": {
+ "lexical_alpha": 0,
+ "semantic_alpha": 0,
+ "top_k_retrieved_chunks": 0,
+ },
+ },
datastore_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"],
description="xxx",
suggested_queries=["string"],
@@ -75,6 +95,26 @@ def test_method_update(self, client: ContextualAI) -> None:
def test_method_update_with_all_params(self, client: ContextualAI) -> None:
agent = client.agents.update(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ agent_configs={
+ "filter_and_rerank_config": {"top_k_reranked_chunks": 0},
+ "generate_response_config": {
+ "frequency_penalty": 0,
+ "max_new_tokens": 0,
+ "seed": 0,
+ "temperature": 0,
+ "top_p": 0,
+ },
+ "global_config": {
+ "enable_filter": True,
+ "enable_multi_turn": True,
+ "enable_rerank": True,
+ },
+ "retrieval_config": {
+ "lexical_alpha": 0,
+ "semantic_alpha": 0,
+ "top_k_retrieved_chunks": 0,
+ },
+ },
datastore_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"],
llm_model_id="llm_model_id",
suggested_queries=["string"],
@@ -237,6 +277,26 @@ async def test_method_create(self, async_client: AsyncContextualAI) -> None:
async def test_method_create_with_all_params(self, async_client: AsyncContextualAI) -> None:
agent = await async_client.agents.create(
name="xxx",
+ agent_configs={
+ "filter_and_rerank_config": {"top_k_reranked_chunks": 0},
+ "generate_response_config": {
+ "frequency_penalty": 0,
+ "max_new_tokens": 0,
+ "seed": 0,
+ "temperature": 0,
+ "top_p": 0,
+ },
+ "global_config": {
+ "enable_filter": True,
+ "enable_multi_turn": True,
+ "enable_rerank": True,
+ },
+ "retrieval_config": {
+ "lexical_alpha": 0,
+ "semantic_alpha": 0,
+ "top_k_retrieved_chunks": 0,
+ },
+ },
datastore_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"],
description="xxx",
suggested_queries=["string"],
@@ -279,6 +339,26 @@ async def test_method_update(self, async_client: AsyncContextualAI) -> None:
async def test_method_update_with_all_params(self, async_client: AsyncContextualAI) -> None:
agent = await async_client.agents.update(
agent_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ agent_configs={
+ "filter_and_rerank_config": {"top_k_reranked_chunks": 0},
+ "generate_response_config": {
+ "frequency_penalty": 0,
+ "max_new_tokens": 0,
+ "seed": 0,
+ "temperature": 0,
+ "top_p": 0,
+ },
+ "global_config": {
+ "enable_filter": True,
+ "enable_multi_turn": True,
+ "enable_rerank": True,
+ },
+ "retrieval_config": {
+ "lexical_alpha": 0,
+ "semantic_alpha": 0,
+ "top_k_retrieved_chunks": 0,
+ },
+ },
datastore_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"],
llm_model_id="llm_model_id",
suggested_queries=["string"],
diff --git a/tests/api_resources/test_generate.py b/tests/api_resources/test_generate.py
index 7cbf83f2..2a379f22 100644
--- a/tests/api_resources/test_generate.py
+++ b/tests/api_resources/test_generate.py
@@ -42,6 +42,7 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None:
}
],
model="model",
+ avoid_commentary=True,
system_prompt="system_prompt",
)
assert_matches_type(GenerateCreateResponse, generate, path=["response"])
@@ -113,6 +114,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual
}
],
model="model",
+ avoid_commentary=True,
system_prompt="system_prompt",
)
assert_matches_type(GenerateCreateResponse, generate, path=["response"])
diff --git a/tests/api_resources/test_rerank.py b/tests/api_resources/test_rerank.py
index cd4e4f76..6f97de08 100644
--- a/tests/api_resources/test_rerank.py
+++ b/tests/api_resources/test_rerank.py
@@ -22,7 +22,7 @@ def test_method_create(self, client: ContextualAI) -> None:
rerank = client.rerank.create(
documents=["string"],
model="model",
- query="x",
+ query="query",
)
assert_matches_type(RerankCreateResponse, rerank, path=["response"])
@@ -31,7 +31,9 @@ def test_method_create_with_all_params(self, client: ContextualAI) -> None:
rerank = client.rerank.create(
documents=["string"],
model="model",
- query="x",
+ query="query",
+ instruction="instruction",
+ metadata=["string"],
top_n=0,
)
assert_matches_type(RerankCreateResponse, rerank, path=["response"])
@@ -41,7 +43,7 @@ def test_raw_response_create(self, client: ContextualAI) -> None:
response = client.rerank.with_raw_response.create(
documents=["string"],
model="model",
- query="x",
+ query="query",
)
assert response.is_closed is True
@@ -54,7 +56,7 @@ def test_streaming_response_create(self, client: ContextualAI) -> None:
with client.rerank.with_streaming_response.create(
documents=["string"],
model="model",
- query="x",
+ query="query",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -73,7 +75,7 @@ async def test_method_create(self, async_client: AsyncContextualAI) -> None:
rerank = await async_client.rerank.create(
documents=["string"],
model="model",
- query="x",
+ query="query",
)
assert_matches_type(RerankCreateResponse, rerank, path=["response"])
@@ -82,7 +84,9 @@ async def test_method_create_with_all_params(self, async_client: AsyncContextual
rerank = await async_client.rerank.create(
documents=["string"],
model="model",
- query="x",
+ query="query",
+ instruction="instruction",
+ metadata=["string"],
top_n=0,
)
assert_matches_type(RerankCreateResponse, rerank, path=["response"])
@@ -92,7 +96,7 @@ async def test_raw_response_create(self, async_client: AsyncContextualAI) -> Non
response = await async_client.rerank.with_raw_response.create(
documents=["string"],
model="model",
- query="x",
+ query="query",
)
assert response.is_closed is True
@@ -105,7 +109,7 @@ async def test_streaming_response_create(self, async_client: AsyncContextualAI)
async with async_client.rerank.with_streaming_response.create(
documents=["string"],
model="model",
- query="x",
+ query="query",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/api_resources/test_users.py b/tests/api_resources/test_users.py
new file mode 100644
index 00000000..61e7ffe2
--- /dev/null
+++ b/tests/api_resources/test_users.py
@@ -0,0 +1,319 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from contextual import ContextualAI, AsyncContextualAI
+from tests.utils import assert_matches_type
+from contextual.types import (
+ ListUsersResponse,
+ InviteUsersResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestUsers:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_update(self, client: ContextualAI) -> None:
+ user = client.users.update(
+ email="email",
+ )
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ def test_method_update_with_all_params(self, client: ContextualAI) -> None:
+ user = client.users.update(
+ email="email",
+ is_tenant_admin=True,
+ per_agent_roles=[
+ {
+ "agent_id": "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ "grant": True,
+ "roles": ["AGENT_USER"],
+ }
+ ],
+ roles=["AGENT_USER"],
+ )
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: ContextualAI) -> None:
+ response = client.users.with_raw_response.update(
+ email="email",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: ContextualAI) -> None:
+ with client.users.with_streaming_response.update(
+ email="email",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(object, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_list(self, client: ContextualAI) -> None:
+ user = client.users.list()
+ assert_matches_type(ListUsersResponse, user, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: ContextualAI) -> None:
+ user = client.users.list(
+ cursor="cursor",
+ deactivated=True,
+ limit=0,
+ search="search",
+ )
+ assert_matches_type(ListUsersResponse, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: ContextualAI) -> None:
+ response = client.users.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(ListUsersResponse, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: ContextualAI) -> None:
+ with client.users.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(ListUsersResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_deactivate(self, client: ContextualAI) -> None:
+ user = client.users.deactivate(
+ email="email",
+ )
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_deactivate(self, client: ContextualAI) -> None:
+ response = client.users.with_raw_response.deactivate(
+ email="email",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_deactivate(self, client: ContextualAI) -> None:
+ with client.users.with_streaming_response.deactivate(
+ email="email",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(object, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_invite(self, client: ContextualAI) -> None:
+ user = client.users.invite(
+ new_users=[{"email": "email"}],
+ tenant_short_name="tenant_short_name",
+ )
+ assert_matches_type(InviteUsersResponse, user, path=["response"])
+
+ @parametrize
+ def test_raw_response_invite(self, client: ContextualAI) -> None:
+ response = client.users.with_raw_response.invite(
+ new_users=[{"email": "email"}],
+ tenant_short_name="tenant_short_name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = response.parse()
+ assert_matches_type(InviteUsersResponse, user, path=["response"])
+
+ @parametrize
+ def test_streaming_response_invite(self, client: ContextualAI) -> None:
+ with client.users.with_streaming_response.invite(
+ new_users=[{"email": "email"}],
+ tenant_short_name="tenant_short_name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = response.parse()
+ assert_matches_type(InviteUsersResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncUsers:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncContextualAI) -> None:
+ user = await async_client.users.update(
+ email="email",
+ )
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncContextualAI) -> None:
+ user = await async_client.users.update(
+ email="email",
+ is_tenant_admin=True,
+ per_agent_roles=[
+ {
+ "agent_id": "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ "grant": True,
+ "roles": ["AGENT_USER"],
+ }
+ ],
+ roles=["AGENT_USER"],
+ )
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.users.with_raw_response.update(
+ email="email",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = await response.parse()
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.users.with_streaming_response.update(
+ email="email",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(object, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncContextualAI) -> None:
+ user = await async_client.users.list()
+ assert_matches_type(ListUsersResponse, user, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncContextualAI) -> None:
+ user = await async_client.users.list(
+ cursor="cursor",
+ deactivated=True,
+ limit=0,
+ search="search",
+ )
+ assert_matches_type(ListUsersResponse, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.users.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = await response.parse()
+ assert_matches_type(ListUsersResponse, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.users.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(ListUsersResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_deactivate(self, async_client: AsyncContextualAI) -> None:
+ user = await async_client.users.deactivate(
+ email="email",
+ )
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_deactivate(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.users.with_raw_response.deactivate(
+ email="email",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = await response.parse()
+ assert_matches_type(object, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_deactivate(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.users.with_streaming_response.deactivate(
+ email="email",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(object, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_invite(self, async_client: AsyncContextualAI) -> None:
+ user = await async_client.users.invite(
+ new_users=[{"email": "email"}],
+ tenant_short_name="tenant_short_name",
+ )
+ assert_matches_type(InviteUsersResponse, user, path=["response"])
+
+ @parametrize
+ async def test_raw_response_invite(self, async_client: AsyncContextualAI) -> None:
+ response = await async_client.users.with_raw_response.invite(
+ new_users=[{"email": "email"}],
+ tenant_short_name="tenant_short_name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ user = await response.parse()
+ assert_matches_type(InviteUsersResponse, user, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_invite(self, async_client: AsyncContextualAI) -> None:
+ async with async_client.users.with_streaming_response.invite(
+ new_users=[{"email": "email"}],
+ tenant_short_name="tenant_short_name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ user = await response.parse()
+ assert_matches_type(InviteUsersResponse, user, path=["response"])
+
+ assert cast(Any, response.is_closed) is True