Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ client = ZaiClient(api_key="your-api-key")

# Create chat completion
response = client.chat.completions.create(
model='glm-4',
model='glm-4.6',
messages=[
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'Tell me a story about AI.'},
Expand All @@ -192,7 +192,7 @@ client = ZaiClient(api_key="your-api-key")

# Create chat completion
response = client.chat.completions.create(
model='glm-4',
model='glm-4.6',
messages=[
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'What is artificial intelligence?'},
Expand Down Expand Up @@ -228,7 +228,7 @@ client = ZaiClient(api_key="your-api-key")
base64_image = encode_image('examples/test_multi_modal.jpeg')

response = client.chat.completions.create(
model='glm-4v',
model='glm-4.6v',
messages=[
{
'role': 'user',
Expand Down Expand Up @@ -338,7 +338,7 @@ client = ZaiClient(api_key="your-api-key")

try:
response = client.chat.completions.create(
model="glm-4",
model="glm-4.6",
messages=[
{"role": "user", "content": "Hello, Z.ai!"}
]
Expand Down
10 changes: 5 additions & 5 deletions README_CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ client = ZhipuAiClient(api_key="your-api-key")

# Create chat completion
response = client.chat.completions.create(
model="glm-4",
model="glm-4.6",
messages=[
{"role": "user", "content": "Hello, Z.ai!"}
]
Expand Down Expand Up @@ -175,7 +175,7 @@ client = ZaiClient(api_key="your-api-key")

# 创建对话
response = client.chat.completions.create(
model='glm-4',
model='glm-4.6',
messages=[
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'Tell me a story about AI.'},
Expand All @@ -198,7 +198,7 @@ client = ZaiClient(api_key="your-api-key")

# 创建对话
response = client.chat.completions.create(
model='glm-4',
model='glm-4.6',
messages=[
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'What is artificial intelligence?'},
Expand Down Expand Up @@ -234,7 +234,7 @@ client = ZaiClient(api_key="your-api-key")
base64_image = encode_image('examples/test_multi_modal.jpeg')

response = client.chat.completions.create(
model='glm-4v',
model='glm-4.6v',
messages=[
{
'role': 'user',
Expand Down Expand Up @@ -344,7 +344,7 @@ client = ZaiClient(api_key="your-api-key") # 请填写您自己的APIKey

try:
response = client.chat.completions.create(
model="glm-4",
model="glm-4.6",
messages=[
{"role": "user", "content": "你好, Z.ai !"}
]
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def encode_image(image_path):
base64_image = encode_image('examples/test_multi_modal.jpeg')

response = client.chat.completions.create(
model='glm-4v',
model='glm-4.6v',
messages=[
{
'role': 'user',
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "zai-sdk"
version = "0.0.4.3"
version = "0.1.0"
description = "A SDK library for accessing big model apis from Z.ai"
authors = ["Z.ai"]
readme = "README.md"
Expand Down
2 changes: 1 addition & 1 deletion src/zai/_version.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
__title__ = 'Z.ai'
__version__ = '0.0.4.3'
__version__ = '0.1.0'
2 changes: 1 addition & 1 deletion src/zai/types/chat/chat_completion_chunk.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class ChoiceDelta(BaseModel):
Attributes:
content: Content delta
role: Role of the message sender
reasoning_content: Reasoning content delta
reasoning_content: Reasoning content delta, it's recommended to return the model's reasoning_content in next request to achieve better results in multi-turn conversations.
tool_calls: List of tool call deltas
audio: Audio completion chunk
"""
Expand Down
6 changes: 3 additions & 3 deletions tests/integration_tests/test_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ def test_completions_stream_with_tools(logging_conf):
print(f'request_id:{request_id}')
response = client.chat.completions.create(
request_id=request_id,
model='glm-4v', # Fill in the model name to call
model='glm-4.6v', # Fill in the model name to call
extra_body={'temperature': 0.5, 'max_tokens': 50},
messages=[
{
Expand Down Expand Up @@ -361,7 +361,7 @@ def test_completions_vis_base64(test_file_path, logging_conf):
print(f'request_id:{request_id}')
response = client.chat.completions.create(
request_id=request_id,
model='glm-4v', # Fill in the model name to call
model='glm-4.6v', # Fill in the model name to call
extra_body={'temperature': 0.5, 'max_tokens': 50},
messages=[
{
Expand Down Expand Up @@ -402,7 +402,7 @@ def test_async_completions(logging_conf):
print(f'request_id:{request_id}')
response = client.chat.completions.create(
request_id=request_id,
model='glm-4v', # Fill in the model name to call
model='glm-4.6v', # Fill in the model name to call
messages=[
{
'role': 'user',
Expand Down