From 520383961b6231dc634a167404d20c64af79ac59 Mon Sep 17 00:00:00 2001 From: Jason Liu Date: Mon, 5 Feb 2024 14:19:58 -0500 Subject: [PATCH] bump docs --- README.md | 39 +++++++++++++++++++++++++++++----- tests/openai/docs/test_docs.py | 15 ++++++++----- 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 95a5dd1d2..84516df15 100644 --- a/README.md +++ b/README.md @@ -79,10 +79,11 @@ For async clients you must use `apatch` vs. `patch`, as shown: ```py import instructor -from openai import AsyncOpenAI +import asyncio +import openai from pydantic import BaseModel -aclient = instructor.apatch(AsyncOpenAI()) +aclient = instructor.apatch(openai.AsyncOpenAI()) class UserExtract(BaseModel): @@ -90,7 +91,7 @@ class UserExtract(BaseModel): age: int -model = await aclient.chat.completions.create( +task = aclient.chat.completions.create( model="gpt-3.5-turbo", response_model=UserExtract, messages=[ @@ -98,7 +99,15 @@ model = await aclient.chat.completions.create( ], ) -assert isinstance(model, UserExtract) + +response = asyncio.run(task) +print(response.model_dump_json(indent=2)) +""" +{ + "name": "Jason", + "age": 25 +} +""" ``` ### Step 1: Patch the client @@ -132,7 +141,19 @@ class UserDetail(BaseModel): Use the `client.chat.completions.create` method to send a prompt and extract the data into the Pydantic object. The `response_model` parameter specifies the Pydantic model to use for extraction. It is helpful to annotate the variable with the type of the response model which will help your IDE provide autocomplete and spell check. ```python -user: UserDetail = client.chat.completions.create( +import instructor +import openai +from pydantic import BaseModel + +client = instructor.patch(openai.OpenAI()) + + +class UserDetail(BaseModel): + name: str + age: int + + +user = client.chat.completions.create( model="gpt-3.5-turbo", response_model=UserDetail, messages=[ @@ -140,8 +161,16 @@ user: UserDetail = client.chat.completions.create( ], ) +assert isinstance(user, UserDetail) assert user.name == "Jason" assert user.age == 25 +print(user.model_dump_json(indent=2)) +""" +{ + "name": "Jason", + "age": 25 +} +""" ``` ## Pydantic Validation diff --git a/tests/openai/docs/test_docs.py b/tests/openai/docs/test_docs.py index a8eb1f2e8..4f46ab586 100644 --- a/tests/openai/docs/test_docs.py +++ b/tests/openai/docs/test_docs.py @@ -5,7 +5,12 @@ @pytest.mark.parametrize("example", find_examples("README.md"), ids=str) def test_readme(example: CodeExample, eval_example: EvalExample): - eval_example.format(example) + if eval_example.update_examples: + eval_example.format(example) + eval_example.run_print_update(example) + else: + eval_example.lint(example) + eval_example.run(example) @pytest.mark.parametrize("example", find_examples("docs/index.md"), ids=str) @@ -15,7 +20,7 @@ def test_index(example: CodeExample, eval_example: EvalExample): eval_example.run_print_update(example) else: eval_example.lint(example) - eval_example.run_print_check(example) + eval_example.run(example) @pytest.mark.skip("This is a test for the blog post, which is often broken up") @@ -26,7 +31,7 @@ def test_format_blog(example: CodeExample, eval_example: EvalExample): eval_example.run_print_update(example) else: eval_example.lint(example) - eval_example.run_print_check(example) + eval_example.run(example) @pytest.mark.parametrize("example", find_examples("docs/concepts"), ids=str) @@ -36,7 +41,7 @@ def test_format_concepts(example: CodeExample, eval_example: EvalExample): eval_example.run_print_update(example) else: eval_example.lint(example) - eval_example.run_print_check(example) + eval_example.run(example) @pytest.mark.skip( @@ -49,4 +54,4 @@ def test_format_examples(example: CodeExample, eval_example: EvalExample): eval_example.run_print_update(example) else: eval_example.lint(example) - eval_example.run_print_check(example) + eval_example.run(example)