Skip to content

Commit

Permalink
bump docs
Browse files Browse the repository at this point in the history
  • Loading branch information
jxnl committed Feb 5, 2024
1 parent f8af46e commit 5203839
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 10 deletions.
39 changes: 34 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,26 +79,35 @@ For async clients you must use `apatch` vs. `patch`, as shown:

```py
import instructor
from openai import AsyncOpenAI
import asyncio
import openai
from pydantic import BaseModel

aclient = instructor.apatch(AsyncOpenAI())
aclient = instructor.apatch(openai.AsyncOpenAI())


class UserExtract(BaseModel):
name: str
age: int


model = await aclient.chat.completions.create(
task = aclient.chat.completions.create(
model="gpt-3.5-turbo",
response_model=UserExtract,
messages=[
{"role": "user", "content": "Extract jason is 25 years old"},
],
)

assert isinstance(model, UserExtract)

response = asyncio.run(task)
print(response.model_dump_json(indent=2))
"""
{
"name": "Jason",
"age": 25
}
"""
```

### Step 1: Patch the client
Expand Down Expand Up @@ -132,16 +141,36 @@ class UserDetail(BaseModel):
Use the `client.chat.completions.create` method to send a prompt and extract the data into the Pydantic object. The `response_model` parameter specifies the Pydantic model to use for extraction. It is helpful to annotate the variable with the type of the response model which will help your IDE provide autocomplete and spell check.

```python
user: UserDetail = client.chat.completions.create(
import instructor
import openai
from pydantic import BaseModel

client = instructor.patch(openai.OpenAI())


class UserDetail(BaseModel):
name: str
age: int


user = client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=UserDetail,
messages=[
{"role": "user", "content": "Extract Jason is 25 years old"},
],
)

assert isinstance(user, UserDetail)
assert user.name == "Jason"
assert user.age == 25
print(user.model_dump_json(indent=2))
"""
{
"name": "Jason",
"age": 25
}
"""
```

## Pydantic Validation
Expand Down
15 changes: 10 additions & 5 deletions tests/openai/docs/test_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,12 @@

@pytest.mark.parametrize("example", find_examples("README.md"), ids=str)
def test_readme(example: CodeExample, eval_example: EvalExample):
eval_example.format(example)
if eval_example.update_examples:
eval_example.format(example)
eval_example.run_print_update(example)
else:
eval_example.lint(example)
eval_example.run(example)


@pytest.mark.parametrize("example", find_examples("docs/index.md"), ids=str)
Expand All @@ -15,7 +20,7 @@ def test_index(example: CodeExample, eval_example: EvalExample):
eval_example.run_print_update(example)
else:
eval_example.lint(example)
eval_example.run_print_check(example)
eval_example.run(example)


@pytest.mark.skip("This is a test for the blog post, which is often broken up")
Expand All @@ -26,7 +31,7 @@ def test_format_blog(example: CodeExample, eval_example: EvalExample):
eval_example.run_print_update(example)
else:
eval_example.lint(example)
eval_example.run_print_check(example)
eval_example.run(example)


@pytest.mark.parametrize("example", find_examples("docs/concepts"), ids=str)
Expand All @@ -36,7 +41,7 @@ def test_format_concepts(example: CodeExample, eval_example: EvalExample):
eval_example.run_print_update(example)
else:
eval_example.lint(example)
eval_example.run_print_check(example)
eval_example.run(example)


@pytest.mark.skip(
Expand All @@ -49,4 +54,4 @@ def test_format_examples(example: CodeExample, eval_example: EvalExample):
eval_example.run_print_update(example)
else:
eval_example.lint(example)
eval_example.run_print_check(example)
eval_example.run(example)

0 comments on commit 5203839

Please sign in to comment.