Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ async def _completions_create(
tool_choice=tool_choice or NOT_GIVEN,
stream=stream,
stream_options={'include_usage': True} if stream else NOT_GIVEN,
max_tokens=model_settings.get('max_tokens', NOT_GIVEN),
max_completion_tokens=model_settings.get('max_tokens', NOT_GIVEN),
temperature=model_settings.get('temperature', NOT_GIVEN),
top_p=model_settings.get('top_p', NOT_GIVEN),
timeout=model_settings.get('timeout', NOT_GIVEN),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
interactions:
- request:
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '123'
content-type:
- application/json
host:
- api.openai.com
method: POST
parsed_body:
max_completion_tokens: 100
messages:
- content: hello
role: user
model: gpt-4.5-preview
n: 1
stream: false
uri: https://api.openai.com/v1/chat/completions
response:
headers:
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
connection:
- keep-alive
content-length:
- '807'
content-type:
- application/json
openai-organization:
- pydantic-28gund
openai-processing-ms:
- '1408'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
transfer-encoding:
- chunked
parsed_body:
choices:
- finish_reason: stop
index: 0
message:
annotations: []
content: Hello! How can I help you today?
refusal: null
role: assistant
created: 1742636225
id: chatcmpl-BDpZplWguNLn40wA5mIpCR3OIzvYP
model: gpt-4.5-preview-2025-02-27
object: chat.completion
service_tier: default
system_fingerprint: null
usage:
completion_tokens: 10
completion_tokens_details:
accepted_prediction_tokens: 0
audio_tokens: 0
reasoning_tokens: 0
rejected_prediction_tokens: 0
prompt_tokens: 8
prompt_tokens_details:
audio_tokens: 0
cached_tokens: 0
total_tokens: 18
status:
code: 200
message: OK
version: 1
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
interactions:
- request:
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '119'
content-type:
- application/json
host:
- api.openai.com
method: POST
parsed_body:
max_completion_tokens: 100
messages:
- content: hello
role: user
model: gpt-4o-mini
n: 1
stream: false
uri: https://api.openai.com/v1/chat/completions
response:
headers:
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
connection:
- keep-alive
content-length:
- '840'
content-type:
- application/json
openai-organization:
- pydantic-28gund
openai-processing-ms:
- '278'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
transfer-encoding:
- chunked
parsed_body:
choices:
- finish_reason: stop
index: 0
logprobs: null
message:
annotations: []
content: Hello! How can I assist you today?
refusal: null
role: assistant
created: 1742636224
id: chatcmpl-BDpZoxw4i90ZesN8iyrwLmGWRJ5lz
model: gpt-4o-mini-2024-07-18
object: chat.completion
service_tier: default
system_fingerprint: fp_b8bc95a0ac
usage:
completion_tokens: 10
completion_tokens_details:
accepted_prediction_tokens: 0
audio_tokens: 0
reasoning_tokens: 0
rejected_prediction_tokens: 0
prompt_tokens: 8
prompt_tokens_details:
audio_tokens: 0
cached_tokens: 0
total_tokens: 18
status:
code: 200
message: OK
version: 1
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
interactions:
- request:
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '115'
content-type:
- application/json
host:
- api.openai.com
method: POST
parsed_body:
max_completion_tokens: 100
messages:
- content: hello
role: user
model: o3-mini
n: 1
stream: false
uri: https://api.openai.com/v1/chat/completions
response:
headers:
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
connection:
- keep-alive
content-length:
- '817'
content-type:
- application/json
openai-organization:
- pydantic-28gund
openai-processing-ms:
- '1895'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
transfer-encoding:
- chunked
parsed_body:
choices:
- finish_reason: stop
index: 0
message:
annotations: []
content: Hello there! How can I help you today?
refusal: null
role: assistant
created: 1742636222
id: chatcmpl-BDpZm1SiItIXIcDA0xRV9QGZhD97e
model: o3-mini-2025-01-31
object: chat.completion
service_tier: default
system_fingerprint: fp_617f206dd9
usage:
completion_tokens: 85
completion_tokens_details:
accepted_prediction_tokens: 0
audio_tokens: 0
reasoning_tokens: 64
rejected_prediction_tokens: 0
prompt_tokens: 7
prompt_tokens_details:
audio_tokens: 0
cached_tokens: 0
total_tokens: 92
status:
code: 200
message: OK
version: 1
12 changes: 11 additions & 1 deletion tests/models/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from pydantic_ai.result import Usage
from pydantic_ai.settings import ModelSettings

from ..conftest import IsNow, TestEnv, raise_if_exception, try_import
from ..conftest import IsNow, IsStr, TestEnv, raise_if_exception, try_import
from .mock_async_stream import MockAsyncStream

with try_import() as imports_successful:
Expand Down Expand Up @@ -684,3 +684,13 @@ def test_model_status_error(allow_model_requests: None) -> None:
with pytest.raises(ModelHTTPError) as exc_info:
agent.run_sync('hello')
assert str(exc_info.value) == snapshot("status_code: 500, model_name: gpt-4o, body: {'error': 'test error'}")


@pytest.mark.vcr()
@pytest.mark.parametrize('model_name', ['o3-mini', 'gpt-4o-mini', 'gpt-4.5-preview'])
async def test_max_completion_tokens(allow_model_requests: None, model_name: str, openai_api_key: str):
m = OpenAIModel(model_name, provider=OpenAIProvider(api_key=openai_api_key))
agent = Agent(m, model_settings=ModelSettings(max_tokens=100))

result = await agent.run('hello')
assert result.data == IsStr()