Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,17 +71,18 @@ jobs:
poetry install --with test,test_integration

echo "Going to uninstall cohere package, and install the current version from the repo"
cd ../../../cohere-python && echo "Current dir $(pwd)"
echo "Current dir $(ls)"
pip uninstall cohere
pip install .
cd ../langchain-cohere/libs/cohere

echo "Current cohere installation: $(pip freeze | grep cohere)"
poetry remove cohere
poetry add ../../../cohere-python

echo "Current cohere installation: $(poetry show cohere)"
make test
make integration_test
echo "tests passed"

# reset poetry changes
git checkout -- poetry.lock pyproject.toml

set -eu

STATUS="$(git status)"
Expand Down
144 changes: 71 additions & 73 deletions poetry.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name = "cohere"

[tool.poetry]
name = "cohere"
version = "5.16.1"
version = "5.16.2"
description = ""
readme = "README.md"
authors = []
Expand Down
4 changes: 4 additions & 0 deletions src/cohere/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
ChatContentStartEventDelta,
ChatContentStartEventDeltaMessage,
ChatContentStartEventDeltaMessageContent,
ChatContentStartEventDeltaMessageContentType,
ChatDataMetrics,
ChatDebugEvent,
ChatDocument,
Expand Down Expand Up @@ -205,6 +206,7 @@
TextResponseFormatV2,
TextSystemMessageV2ContentItem,
TextToolContent,
ThinkingAssistantMessageResponseContentItem,
TokenizeResponse,
Tool,
ToolCall,
Expand Down Expand Up @@ -325,6 +327,7 @@
"ChatContentStartEventDelta",
"ChatContentStartEventDeltaMessage",
"ChatContentStartEventDeltaMessageContent",
"ChatContentStartEventDeltaMessageContentType",
"ChatDataMetrics",
"ChatDebugEvent",
"ChatDocument",
Expand Down Expand Up @@ -536,6 +539,7 @@
"TextResponseFormatV2",
"TextSystemMessageV2ContentItem",
"TextToolContent",
"ThinkingAssistantMessageResponseContentItem",
"TokenizeResponse",
"TooManyRequestsError",
"Tool",
Expand Down
32 changes: 32 additions & 0 deletions src/cohere/base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ def chat_stream(
*,
message: str,
accepts: typing.Optional[typing.Literal["text/event-stream"]] = None,
raw_prompting: typing.Optional[bool] = OMIT,
model: typing.Optional[str] = OMIT,
preamble: typing.Optional[str] = OMIT,
chat_history: typing.Optional[typing.Sequence[Message]] = OMIT,
Expand Down Expand Up @@ -186,6 +187,12 @@ def chat_stream(
accepts : typing.Optional[typing.Literal["text/event-stream"]]
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.

raw_prompting : typing.Optional[bool]
When enabled, the user's prompt will be sent to the model without
any pre-processing.

Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments

model : typing.Optional[str]
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.

Expand Down Expand Up @@ -407,6 +414,7 @@ def chat_stream(
with self._raw_client.chat_stream(
message=message,
accepts=accepts,
raw_prompting=raw_prompting,
model=model,
preamble=preamble,
chat_history=chat_history,
Expand Down Expand Up @@ -439,6 +447,7 @@ def chat(
*,
message: str,
accepts: typing.Optional[typing.Literal["text/event-stream"]] = None,
raw_prompting: typing.Optional[bool] = OMIT,
model: typing.Optional[str] = OMIT,
preamble: typing.Optional[str] = OMIT,
chat_history: typing.Optional[typing.Sequence[Message]] = OMIT,
Expand Down Expand Up @@ -478,6 +487,12 @@ def chat(
accepts : typing.Optional[typing.Literal["text/event-stream"]]
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.

raw_prompting : typing.Optional[bool]
When enabled, the user's prompt will be sent to the model without
any pre-processing.

Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments

model : typing.Optional[str]
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.

Expand Down Expand Up @@ -710,6 +725,7 @@ def chat(
_response = self._raw_client.chat(
message=message,
accepts=accepts,
raw_prompting=raw_prompting,
model=model,
preamble=preamble,
chat_history=chat_history,
Expand Down Expand Up @@ -1586,6 +1602,7 @@ async def chat_stream(
*,
message: str,
accepts: typing.Optional[typing.Literal["text/event-stream"]] = None,
raw_prompting: typing.Optional[bool] = OMIT,
model: typing.Optional[str] = OMIT,
preamble: typing.Optional[str] = OMIT,
chat_history: typing.Optional[typing.Sequence[Message]] = OMIT,
Expand Down Expand Up @@ -1626,6 +1643,12 @@ async def chat_stream(
accepts : typing.Optional[typing.Literal["text/event-stream"]]
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.

raw_prompting : typing.Optional[bool]
When enabled, the user's prompt will be sent to the model without
any pre-processing.

Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments

model : typing.Optional[str]
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.

Expand Down Expand Up @@ -1855,6 +1878,7 @@ async def main() -> None:
async with self._raw_client.chat_stream(
message=message,
accepts=accepts,
raw_prompting=raw_prompting,
model=model,
preamble=preamble,
chat_history=chat_history,
Expand Down Expand Up @@ -1888,6 +1912,7 @@ async def chat(
*,
message: str,
accepts: typing.Optional[typing.Literal["text/event-stream"]] = None,
raw_prompting: typing.Optional[bool] = OMIT,
model: typing.Optional[str] = OMIT,
preamble: typing.Optional[str] = OMIT,
chat_history: typing.Optional[typing.Sequence[Message]] = OMIT,
Expand Down Expand Up @@ -1927,6 +1952,12 @@ async def chat(
accepts : typing.Optional[typing.Literal["text/event-stream"]]
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.

raw_prompting : typing.Optional[bool]
When enabled, the user's prompt will be sent to the model without
any pre-processing.

Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments

model : typing.Optional[str]
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.

Expand Down Expand Up @@ -2167,6 +2198,7 @@ async def main() -> None:
_response = await self._raw_client.chat(
message=message,
accepts=accepts,
raw_prompting=raw_prompting,
model=model,
preamble=preamble,
chat_history=chat_history,
Expand Down
4 changes: 2 additions & 2 deletions src/cohere/core/client_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ def __init__(

def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"User-Agent": "cohere/5.16.1",
"User-Agent": "cohere/5.16.2",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "cohere",
"X-Fern-SDK-Version": "5.16.1",
"X-Fern-SDK-Version": "5.16.2",
}
if self._client_name is not None:
headers["X-Client-Name"] = self._client_name
Expand Down
38 changes: 38 additions & 0 deletions src/cohere/finetuning/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ def list_finetuned_models(
request_options: typing.Optional[RequestOptions] = None,
) -> ListFinetunedModelsResponse:
"""
Returns a list of fine-tuned models that the user has access to.

Parameters
----------
page_size : typing.Optional[int]
Expand Down Expand Up @@ -88,6 +90,8 @@ def create_finetuned_model(
self, *, request: FinetunedModel, request_options: typing.Optional[RequestOptions] = None
) -> CreateFinetunedModelResponse:
"""
Creates a new fine-tuned model. The model will be trained on the dataset specified in the request body. The training process may take some time, and the model will be available once the training is complete.

Parameters
----------
request : FinetunedModel
Expand Down Expand Up @@ -128,6 +132,8 @@ def get_finetuned_model(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetFinetunedModelResponse:
"""
Retrieve a fine-tuned model by its ID.

Parameters
----------
id : str
Expand Down Expand Up @@ -160,6 +166,9 @@ def delete_finetuned_model(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> DeleteFinetunedModelResponse:
"""
Deletes a fine-tuned model. The model will be removed from the system and will no longer be available for use.
This operation is irreversible.

Parameters
----------
id : str
Expand Down Expand Up @@ -198,6 +207,8 @@ def update_finetuned_model(
request_options: typing.Optional[RequestOptions] = None,
) -> UpdateFinetunedModelResponse:
"""
Updates the fine-tuned model with the given ID. The model will be updated with the new settings and name provided in the request body.

Parameters
----------
id : str
Expand Down Expand Up @@ -255,6 +266,10 @@ def list_events(
request_options: typing.Optional[RequestOptions] = None,
) -> ListEventsResponse:
"""
Returns a list of events that occurred during the life-cycle of the fine-tuned model.
The events are ordered by creation time, with the most recent event first.
The list can be paginated using `page_size` and `page_token` parameters.

Parameters
----------
finetuned_model_id : str
Expand Down Expand Up @@ -313,6 +328,10 @@ def list_training_step_metrics(
request_options: typing.Optional[RequestOptions] = None,
) -> ListTrainingStepMetricsResponse:
"""
Returns a list of metrics measured during the training of a fine-tuned model.
The metrics are ordered by step number, with the most recent step first.
The list can be paginated using `page_size` and `page_token` parameters.

Parameters
----------
finetuned_model_id : str
Expand Down Expand Up @@ -375,6 +394,8 @@ async def list_finetuned_models(
request_options: typing.Optional[RequestOptions] = None,
) -> ListFinetunedModelsResponse:
"""
Returns a list of fine-tuned models that the user has access to.

Parameters
----------
page_size : typing.Optional[int]
Expand Down Expand Up @@ -427,6 +448,8 @@ async def create_finetuned_model(
self, *, request: FinetunedModel, request_options: typing.Optional[RequestOptions] = None
) -> CreateFinetunedModelResponse:
"""
Creates a new fine-tuned model. The model will be trained on the dataset specified in the request body. The training process may take some time, and the model will be available once the training is complete.

Parameters
----------
request : FinetunedModel
Expand Down Expand Up @@ -475,6 +498,8 @@ async def get_finetuned_model(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetFinetunedModelResponse:
"""
Retrieve a fine-tuned model by its ID.

Parameters
----------
id : str
Expand Down Expand Up @@ -515,6 +540,9 @@ async def delete_finetuned_model(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> DeleteFinetunedModelResponse:
"""
Deletes a fine-tuned model. The model will be removed from the system and will no longer be available for use.
This operation is irreversible.

Parameters
----------
id : str
Expand Down Expand Up @@ -561,6 +589,8 @@ async def update_finetuned_model(
request_options: typing.Optional[RequestOptions] = None,
) -> UpdateFinetunedModelResponse:
"""
Updates the fine-tuned model with the given ID. The model will be updated with the new settings and name provided in the request body.

Parameters
----------
id : str
Expand Down Expand Up @@ -626,6 +656,10 @@ async def list_events(
request_options: typing.Optional[RequestOptions] = None,
) -> ListEventsResponse:
"""
Returns a list of events that occurred during the life-cycle of the fine-tuned model.
The events are ordered by creation time, with the most recent event first.
The list can be paginated using `page_size` and `page_token` parameters.

Parameters
----------
finetuned_model_id : str
Expand Down Expand Up @@ -692,6 +726,10 @@ async def list_training_step_metrics(
request_options: typing.Optional[RequestOptions] = None,
) -> ListTrainingStepMetricsResponse:
"""
Returns a list of metrics measured during the training of a fine-tuned model.
The metrics are ordered by step number, with the most recent step first.
The list can be paginated using `page_size` and `page_token` parameters.

Parameters
----------
finetuned_model_id : str
Expand Down
Loading