Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "zai-sdk"
version = "0.0.3.2"
version = "0.0.3.3"
description = "A SDK library for accessing big model apis from Z.ai"
authors = ["Z.ai"]
readme = "README.md"
Expand Down
2 changes: 1 addition & 1 deletion src/zai/_version.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
__title__ = 'Z.ai'
__version__ = '0.0.3.2'
__version__ = '0.0.3.3'
3 changes: 3 additions & 0 deletions src/zai/api_resource/audio/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ def customization(
extra_headers: Headers | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
watermark_enabled: Optional[bool] | NotGiven = NOT_GIVEN,
) -> HttpxBinaryResponseContent:
"""
Generate customized speech audio with voice cloning
Expand All @@ -119,6 +120,7 @@ def customization(
extra_headers (Headers): Additional headers to send
extra_body (Body): Additional body parameters
timeout (float | httpx.Timeout): Request timeout
watermark_enabled (Optional[bool]): Whether to enable watermark on generated audio
"""
body = deepcopy_minimal(
{
Expand All @@ -130,6 +132,7 @@ def customization(
'sensitive_word_check': sensitive_word_check,
'request_id': request_id,
'user_id': user_id,
'watermark_enabled': watermark_enabled,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[['voice_data']])
Expand Down
3 changes: 3 additions & 0 deletions src/zai/api_resource/chat/async_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
response_format: object | None = None,
thinking: object | None = None,
watermark_enabled: Optional[bool] | NotGiven = NOT_GIVEN,
) -> AsyncTaskStatus:
"""
Create an asynchronous chat completion task
Expand All @@ -84,6 +85,7 @@ def create(
timeout (float | httpx.Timeout): Request timeout
response_format (Optional[object]): Response format specification
thinking (Optional[object]): Configuration parameters for model reasoning
watermark_enabled (Optional[bool]): Whether to enable watermark on generated audio
"""
_cast_type = AsyncTaskStatus
logger.debug(f'temperature:{temperature}, top_p:{top_p}')
Expand Down Expand Up @@ -128,6 +130,7 @@ def create(
'extra': maybe_transform(extra, code_geex_params.CodeGeexExtra),
'response_format': response_format,
'thinking': thinking,
'watermark_enabled': watermark_enabled,
}
return self._post(
'/async/chat/completions',
Expand Down
3 changes: 3 additions & 0 deletions src/zai/api_resource/chat/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
response_format: object | None = None,
thinking: object | None = None,
watermark_enabled: Optional[bool] | NotGiven = NOT_GIVEN,
) -> Completion | StreamResponse[ChatCompletionChunk]:
"""
Create a chat completion
Expand All @@ -91,6 +92,7 @@ def create(
timeout (float | httpx.Timeout): Request timeout
response_format (object): Response format specification
thinking (Optional[object]): Configuration parameters for model reasoning
watermark_enabled (Optional[bool]): Whether to enable watermark on generated audio
"""
logger.debug(f'temperature:{temperature}, top_p:{top_p}')
if temperature is not None and temperature != NOT_GIVEN:
Expand Down Expand Up @@ -138,6 +140,7 @@ def create(
'extra': maybe_transform(extra, code_geex_params.CodeGeexExtra),
'response_format': response_format,
'thinking': thinking,
'watermark_enabled': watermark_enabled,
}
)
return self._post(
Expand Down
3 changes: 3 additions & 0 deletions src/zai/api_resource/images/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def generations(
extra_body: Body | None = None,
disable_strict_validation: Optional[bool] | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
watermark_enabled: Optional[bool] | NotGiven = NOT_GIVEN,
) -> ImagesResponded:
"""
Generate images from text prompts
Expand All @@ -58,6 +59,7 @@ def generations(
extra_body (Body): Additional body parameters
disable_strict_validation (Optional[bool]): Whether to disable strict validation
timeout (float | httpx.Timeout): Request timeout
watermark_enabled (Optional[bool]): Whether to enable watermark on generated images
"""
_cast_type = ImagesResponded
if disable_strict_validation:
Expand All @@ -76,6 +78,7 @@ def generations(
'user': user,
'user_id': user_id,
'request_id': request_id,
'watermark_enabled': watermark_enabled,
},
options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout),
cast_type=_cast_type,
Expand Down
3 changes: 3 additions & 0 deletions src/zai/api_resource/videos/videos.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ def generations(
extra_headers: Headers | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
watermark_enabled: Optional[bool] | NotGiven = NOT_GIVEN,
) -> VideoObject:
"""
Generate videos from text prompts or images
Expand All @@ -71,6 +72,7 @@ def generations(
extra_headers (Headers): Additional headers to send
extra_body (Body): Additional body parameters
timeout (float | httpx.Timeout): Request timeout
watermark_enabled (Optional[bool]): Whether to enable watermark on generated videos
"""
if not model:
raise ValueError('`model` must be provided.')
Expand All @@ -90,6 +92,7 @@ def generations(
'sensitive_word_check': sensitive_word_check,
'request_id': request_id,
'user_id': user_id,
'watermark_enabled': watermark_enabled,
}
)
return self._post(
Expand Down