From ef1712213d05fb61117d7373c0477329351ae539 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Sat, 29 Nov 2025 11:57:14 +0800 Subject: [PATCH 1/4] feat: support glm-4.6v --- README.md | 2 +- README_CN.md | 2 +- examples/basic_usage.py | 2 +- tests/integration_tests/test_chat.py | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index e218f3f..c313d59 100644 --- a/README.md +++ b/README.md @@ -228,7 +228,7 @@ client = ZaiClient(api_key="your-api-key") base64_image = encode_image('examples/test_multi_modal.jpeg') response = client.chat.completions.create( - model='glm-4v', + model='glm-4.6v', messages=[ { 'role': 'user', diff --git a/README_CN.md b/README_CN.md index e8970dd..474f1bb 100644 --- a/README_CN.md +++ b/README_CN.md @@ -234,7 +234,7 @@ client = ZaiClient(api_key="your-api-key") base64_image = encode_image('examples/test_multi_modal.jpeg') response = client.chat.completions.create( - model='glm-4v', + model='glm-4.6v', messages=[ { 'role': 'user', diff --git a/examples/basic_usage.py b/examples/basic_usage.py index 2cac3cf..493e5de 100644 --- a/examples/basic_usage.py +++ b/examples/basic_usage.py @@ -126,7 +126,7 @@ def encode_image(image_path): base64_image = encode_image('examples/test_multi_modal.jpeg') response = client.chat.completions.create( - model='glm-4v', + model='glm-4.6v', messages=[ { 'role': 'user', diff --git a/tests/integration_tests/test_chat.py b/tests/integration_tests/test_chat.py index 9a189e4..d2b6ea4 100644 --- a/tests/integration_tests/test_chat.py +++ b/tests/integration_tests/test_chat.py @@ -323,7 +323,7 @@ def test_completions_stream_with_tools(logging_conf): print(f'request_id:{request_id}') response = client.chat.completions.create( request_id=request_id, - model='glm-4v', # Fill in the model name to call + model='glm-4.6v', # Fill in the model name to call extra_body={'temperature': 0.5, 'max_tokens': 50}, messages=[ { @@ -361,7 +361,7 @@ def test_completions_vis_base64(test_file_path, logging_conf): print(f'request_id:{request_id}') response = client.chat.completions.create( request_id=request_id, - model='glm-4v', # Fill in the model name to call + model='glm-4.6v', # Fill in the model name to call extra_body={'temperature': 0.5, 'max_tokens': 50}, messages=[ { @@ -402,7 +402,7 @@ def test_async_completions(logging_conf): print(f'request_id:{request_id}') response = client.chat.completions.create( request_id=request_id, - model='glm-4v', # Fill in the model name to call + model='glm-4.6v', # Fill in the model name to call messages=[ { 'role': 'user', From d9005c3a338719cb62b03ab5b4b58be6e84b2996 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Sat, 29 Nov 2025 11:58:58 +0800 Subject: [PATCH 2/4] feat: support glm-4.6v --- pyproject.toml | 2 +- src/zai/_version.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0283f77..6746f5d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "zai-sdk" -version = "0.0.4.3" +version = "0.1.0" description = "A SDK library for accessing big model apis from Z.ai" authors = ["Z.ai"] readme = "README.md" diff --git a/src/zai/_version.py b/src/zai/_version.py index 1718685..61516e7 100644 --- a/src/zai/_version.py +++ b/src/zai/_version.py @@ -1,2 +1,2 @@ __title__ = 'Z.ai' -__version__ = '0.0.4.3' +__version__ = '0.1.0' From dbc51c365d55d473f386bdbd08e6352931f69f05 Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Mon, 8 Dec 2025 11:08:06 +0800 Subject: [PATCH 3/4] feat: support glm-4.6v --- README.md | 6 +++--- README_CN.md | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c313d59..eac4888 100644 --- a/README.md +++ b/README.md @@ -169,7 +169,7 @@ client = ZaiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model='glm-4', + model='glm-4.6', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Tell me a story about AI.'}, @@ -192,7 +192,7 @@ client = ZaiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model='glm-4', + model='glm-4.6', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'What is artificial intelligence?'}, @@ -338,7 +338,7 @@ client = ZaiClient(api_key="your-api-key") try: response = client.chat.completions.create( - model="glm-4", + model="glm-4.6", messages=[ {"role": "user", "content": "Hello, Z.ai!"} ] diff --git a/README_CN.md b/README_CN.md index 474f1bb..1347e63 100644 --- a/README_CN.md +++ b/README_CN.md @@ -107,7 +107,7 @@ client = ZhipuAiClient(api_key="your-api-key") # Create chat completion response = client.chat.completions.create( - model="glm-4", + model="glm-4.6", messages=[ {"role": "user", "content": "Hello, Z.ai!"} ] @@ -175,7 +175,7 @@ client = ZaiClient(api_key="your-api-key") # 创建对话 response = client.chat.completions.create( - model='glm-4', + model='glm-4.6', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Tell me a story about AI.'}, @@ -198,7 +198,7 @@ client = ZaiClient(api_key="your-api-key") # 创建对话 response = client.chat.completions.create( - model='glm-4', + model='glm-4.6', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'What is artificial intelligence?'}, @@ -344,7 +344,7 @@ client = ZaiClient(api_key="your-api-key") # 请填写您自己的APIKey try: response = client.chat.completions.create( - model="glm-4", + model="glm-4.6", messages=[ {"role": "user", "content": "你好, Z.ai !"} ] From fbbe8888c589649d9d88db1b5e1441c5b7c42c9c Mon Sep 17 00:00:00 2001 From: tomsun28 Date: Mon, 8 Dec 2025 11:20:32 +0800 Subject: [PATCH 4/4] feat: support glm-4.6v --- src/zai/types/chat/chat_completion_chunk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/zai/types/chat/chat_completion_chunk.py b/src/zai/types/chat/chat_completion_chunk.py index 4c4ba43..59ff186 100644 --- a/src/zai/types/chat/chat_completion_chunk.py +++ b/src/zai/types/chat/chat_completion_chunk.py @@ -68,7 +68,7 @@ class ChoiceDelta(BaseModel): Attributes: content: Content delta role: Role of the message sender - reasoning_content: Reasoning content delta + reasoning_content: Reasoning content delta, it's recommended to return the model's reasoning_content in next request to achieve better results in multi-turn conversations. tool_calls: List of tool call deltas audio: Audio completion chunk """