Skip to content

Commit

Permalink
Updated the ReadMe and made the code a bit cleaner
Browse files Browse the repository at this point in the history
  • Loading branch information
hello-fri-end committed Mar 29, 2024
1 parent b2addff commit c39d4cb
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 31 deletions.
3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,7 @@ asyncio.run(main())
Functionality wise, the Async and Sync clients are identical.

## Streaming Responses

To enhance application responsiveness, enable streaming by setting the `stream` parameter to `True` in the `.generate` function. This will produce output in chunks instead of the full output at once.
You can enable streaming responses by setting `stream=True` in the `.generate` function.

```python
import os
Expand Down
46 changes: 17 additions & 29 deletions unify/clients.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,20 +71,14 @@ def generate( # noqa: WPS234, WPS211
Raises:
UnifyError: If an error occurs during content generation.
"""
contents = []
if system_prompt:
contents.append({"role": "system", "content": system_prompt})

if isinstance(messages, str):
if system_prompt is None:
contents = [{"role": "user", "content": messages}]
else:
contents = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": messages},
]
contents.append({"role": "user", "content": messages})
else:
if system_prompt is None:
contents = messages
else:
contents = [{"role": "system", "content": system_prompt}]
contents.extend(messages)
contents.extend(messages)

if stream:
return self._generate_stream(contents, model, provider)
Expand Down Expand Up @@ -164,13 +158,12 @@ async def generate( # noqa: WPS234, WPS211
"""Generate content asynchronously using the Unify API.
Args:
messages (Union[str, List[Dict[str, str]]]): A single prompt as a string
or a dictionary containing the conversation history.
messages (Union[str, List[Dict[str, str]]]): A single prompt as a
string or a dictionary containing the conversation history.
system_prompt (Optinal[str]): An optional string containing the
system prompt.
when messages is a string.
model (str): The name of the model.
provider (str): The provider of the model.
model (str): The name of the model. Defaults to "llama-2-13b-chat".
provider (str): The provider of the model. Defaults to "anyscale".
stream (bool): If True, generates content as a stream.
If False, generates content as a single response.
Defaults to False.
Expand All @@ -183,20 +176,15 @@ async def generate( # noqa: WPS234, WPS211
Raises:
UnifyError: If an error occurs during content generation.
"""
contents = []
if system_prompt:
contents.append({"role": "system", "content": system_prompt})

if isinstance(messages, str):
if system_prompt is None:
contents = [{"role": "user", "content": messages}]
else:
contents = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": messages},
]
contents.append({"role": "user", "content": messages})
else:
if system_prompt is None:
contents = messages
else:
contents = [{"role": "system", "content": system_prompt}]
contents.extend(messages)
contents.extend(messages)

if stream:
return self._generate_stream(contents, model, provider)
return await self._generate_non_stream(contents, model, provider)
Expand Down

0 comments on commit c39d4cb

Please sign in to comment.