Skip to content

Commit

Permalink
Merge pull request #250 from infosatheesh2020/main
Browse files Browse the repository at this point in the history
Updated ch06 with chat completion methods for newer models
  • Loading branch information
koreyspace committed Jan 17, 2024
2 parents c462a7b + 564bf35 commit 96efd58
Show file tree
Hide file tree
Showing 7 changed files with 130 additions and 51 deletions.
16 changes: 9 additions & 7 deletions 06-text-generation-apps/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -190,12 +190,13 @@ Now that we learned how to set up and configure openai, it's time to build your

# add your completion code
prompt = "Complete the following: Once upon a time there was a"
messages = [{"role": "user", "content": prompt}]

# make completion
completion = openai.Completion.create(engine= deployment_name, model="davinci-002", prompt=prompt)
completion = client.chat.completions.create(model=deployment, messages=messages)

# print response
print(completion.choices[0].text)
print(completion.choices[0].message.content)
```

> [!NOTE]
Expand Down Expand Up @@ -511,15 +512,16 @@ To further improve it, we want to add the following:
Locate the part in the code that prints out the result from the first prompt and add the following code below:

```python
old_prompt_result = completion.choices[0].text
old_prompt_result = completion.choices[0].message.content
prompt = "Produce a shopping list for the generated recipes and please don't include ingredients that I already have."

new_prompt = f"{old_prompt_result} {prompt}"
completion = openai.Completion.create(engine=deployment_name, prompt=new_prompt, max_tokens=1200)
messages = [{"role": "user", "content": new_prompt}]
completion = openai.Completion.create(engine=deployment_name, messages=messages, max_tokens=1200)

# print response
print("Shopping list:")
print(completion.choices[0].text)
print(completion.choices[0].message.content)
```

Note the following:
Expand Down Expand Up @@ -585,15 +587,15 @@ What we have so far is code that works, but there are some tweaks we should be d
To change the tokens used, you can use the `max_tokens` parameter. For example, if you want to use 100 tokens, you would do:

```python
completion = openai.Completion.create(model="davinci-002", prompt=prompt, max_tokens=100)
completion = client.chat.completions.create(model=deployment, messages=messages, max_tokens=100)
```

- **Experimenting with temperature**. Temperature is something we haven't mentioned so far but is an important context for how our program performs. The higher the temperature value the more random the output will be. Conversely the lower the temperature value the more predictable the output will be. Consider whether you want variation in your output or not.

To alter the temperature, you can use the `temperature` parameter. For example, if you want to use a temperature of 0.5, you would do:

```python
completion = openai.Completion.create(model="davinci-002", prompt=prompt, temperature=0.5)
completion = client.chat.completions.create(model=deployment, messages=messages, temperature=0.5)
```

> Note, the closer to 1.0, the more varied the output.
Expand Down
13 changes: 7 additions & 6 deletions 06-text-generation-apps/app-recipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,22 +22,23 @@

# interpolate the number of recipes into the prompt an ingredients
prompt = f"Show me {no_recipes} recipes for a dish with the following ingredients: {ingredients}. Per recipe, list all the ingredients used, no {filter}: "
messages = [{"role": "user", "content": prompt}]


completion = client.completions.create(model=deployment, prompt=prompt, max_tokens=600, temperature = 0.1)
completion = client.chat.completions.create(model=deployment, messages=messages, max_tokens=600, temperature = 0.1)


# print response
print("Recipes:")
print(completion.choices[0].text)
print(completion.choices[0].message.content)

old_prompt_result = completion.choices[0].text
old_prompt_result = completion.choices[0].message.content
prompt_shopping = "Produce a shopping list, and please don't include ingredients that I already have at home: "

new_prompt = f"Given ingredients at home {ingredients} and these generated recipes: {old_prompt_result}, {prompt_shopping}"
completion = client.completions.create(model=deployment, prompt=prompt, max_tokens=600)
messages = [{"role": "user", "content": new_prompt}]
completion = client.chat.completions.create(model=deployment, messages=messages, max_tokens=600, temperature=0)

# print response
print("\n=====Shopping list ======= \n")
print(completion.choices[0].text)
print(completion.choices[0].message.content)

6 changes: 3 additions & 3 deletions 06-text-generation-apps/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@

# add your completion code
prompt = "Complete the following: Once upon a time there was a"

messages = [{"role": "user", "content": prompt}]
# make completion
completion = client.completions.create(model=deployment, prompt=prompt)
completion = client.chat.completions.create(model=deployment, messages=messages)

# print response
print(completion.choices[0].text)
print(completion.choices[0].message.content)

# very unhappy _____.

Expand Down
36 changes: 36 additions & 0 deletions 06-text-generation-apps/history-bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from openai import AzureOpenAI
import os
import dotenv

# import dotenv
dotenv.load_dotenv()

# configure Azure OpenAI service client
client = AzureOpenAI(
azure_endpoint = os.environ["AZURE_OPENAI_ENDPOINT"],
api_key=os.environ['AZURE_OPENAI_KEY'],
api_version = "2023-10-01-preview"
)

deployment=os.environ['AZURE_OPENAI_DEPLOYMENT']

# add your completion code
persona = input("Tell me the historical character I want to be: ")
question = input("Ask your question about the historical character: ")
prompt = f"""
You are going to play as a historical character {persona}.
Whenever certain questions are asked, you need to remember facts about the timelines and incidents and respond the accurate answer only. Don't create content yourself. If you don't know something, tell that you don't remember.
Provide answer for the question: {question}
"""
messages = [{"role": "user", "content": prompt}]
# make completion
completion = client.chat.completions.create(model=deployment, messages=messages, temperature=0)

# print response
print(completion.choices[0].message.content)

# very unhappy _____.

# Once upon a time there was a very unhappy mermaid.
35 changes: 20 additions & 15 deletions 06-text-generation-apps/notebook-azure-openai.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@
"pip install openai\n",
"```\n",
"\n",
"If you aren't running this notebook in a Codespaces or a Dev Container, you also need to install [Python](https://www.python.org/) on your machine.",
"If you aren't running this notebook in a Codespaces or a Dev Container, you also need to install [Python](https://www.python.org/) on your machine.\n",
"\n",
"### Create a resource\n",
"\n",
Expand Down Expand Up @@ -141,13 +141,13 @@
"\n",
"## Generate text\n",
"\n",
"The way to generate text is to use the `Completion` class. Here's an example:\n",
"The way to generate text is to use the `chat.completion` class. Here's an example:\n",
"\n",
"```python\n",
"prompt = \"Complete the following: Once upon a time there was a\"\n",
"\n",
"completion = client.completions.create(model=deployment, prompt=prompt)\n",
"print(completion.choices[0].text)\n",
"completion = client.chat.completions.create(model=deployment, messages=[{\"role\": \"user\", \"content\": prompt}])\n",
"print(completion.choices[0].message.content)\n",
"```\n",
"\n",
"In the above code, we create a completion object and pass in the model we want to use and the prompt. Then we print the generated text.\n",
Expand Down Expand Up @@ -240,12 +240,13 @@
"\n",
"# add your completion code\n",
"prompt = \"Complete the following: Once upon a time there was a\"\n",
"messages = [{\"role\": \"user\", \"content\": prompt}] \n",
"\n",
"# make completion\n",
"completion = client.completions.create(model=deployment, prompt=prompt)\n",
"completion = client.chat.completions.create(model=deployment, messages=messages)\n",
"\n",
"# print response\n",
"print(completion.choices[0].text)"
"print(completion.choices[0].message.content)"
]
},
{
Expand Down Expand Up @@ -457,12 +458,13 @@
"deployment=os.environ['AZURE_OPENAI_DEPLOYMENT']\n",
"\n",
"prompt = \"Show me 5 recipes for a dish with the following ingredients: chicken, potatoes, and carrots. Per recipe, list all the ingredients used\"\n",
"messages = [{\"role\": \"user\", \"content\": prompt}] \n",
"\n",
"# make completion\n",
"completion = client.completions.create(model=deployment, prompt=prompt, max_tokens=600)\n",
"completion = client.chat.completions.create(model=deployment, messages=messages, max_tokens=600)\n",
"\n",
"# print response\n",
"print(completion.choices[0].text)"
"print(completion.choices[0].message.content)"
]
},
{
Expand Down Expand Up @@ -516,12 +518,13 @@
"\n",
"# interpolate the number of recipes into the prompt an ingredients\n",
"prompt = f\"Show me {no_recipes} recipes for a dish with the following ingredients: {ingredients}. Per recipe, list all the ingredients used\"\n",
"messages = [{\"role\": \"user\", \"content\": prompt}] \n",
"\n",
"# make completion\n",
"completion = client.completions.create(model=deployment, prompt=prompt, max_tokens=600)\n",
"completion = client.chat.completions.create(model=deployment, messages=messages, max_tokens=600)\n",
"\n",
"# print response\n",
"print(completion.choices[0].text)"
"print(completion.choices[0].message.content)"
]
},
{
Expand Down Expand Up @@ -638,11 +641,12 @@
" prompt = \"Produce a shopping list for the generated recipes and please don't include ingredients that I already have.\"\n",
" \n",
" new_prompt = f\"{old_prompt_result} {prompt}\"\n",
" completion = client.completion.create(model=deployment, prompt=new_prompt, max_tokens=1200)\n",
" messages = [{\"role\": \"user\", \"content\": new_prompt}]\n",
" completion = client.chat.completion.create(model=deployment, messages=messages, max_tokens=1200)\n",
" \n",
" # print response\n",
" print(\"Shopping list:\")\n",
" print(completion.choices[0].text)\n",
" print(completion.choices[0].message.content)\n",
" ```\n",
"\n",
" Note the following:\n",
Expand All @@ -651,12 +655,13 @@
" \n",
" ```python\n",
" new_prompt = f\"{old_prompt_result} {prompt}\"\n",
" messages = [{\"role\": \"user\", \"content\": new_prompt}]\n",
" ```\n",
"\n",
" - We make a new request, but also considering the number of tokens we asked for in the first prompt, so this time we say `max_tokens` is 1200. \n",
"\n",
" ```python\n",
" completion = client.completion.create(model=deployment, prompt=new_prompt, max_tokens=1200)\n",
" completion = client.chat.completion.create(model=deployment, messages=messages, max_tokens=1200)\n",
" ``` \n",
"\n",
" Taking this code for a spin, we now arrive at the following output:\n",
Expand All @@ -679,15 +684,15 @@
" To change tokens used, you can use the `max_tokens` parameter. For example, if you want to use 100 tokens, you would do:\n",
"\n",
" ```python\n",
" completion = client.completions.create(model=deployment, prompt=prompt, max_tokens=100)\n",
" completion = client.chat.completion.create(model=deployment, messages=messages, max_tokens=100)\n",
" ```\n",
"\n",
"- **Experimenting with temperature**. Temperature is something we haven't mentioned so far but is an important context for how our program performs. The higher the temperature value the more random the output will be. Conversely the lower the temperature value the more predictable the output will be. Consider whether you want variation in your output or not.\n",
"\n",
" To alter the temperature, you can use the `temperature` parameter. For example, if you want to use a temperature of 0.5, you would do:\n",
"\n",
" ```python\n",
" completion = client.completions.create(model=deployment, prompt=prompt, temperature=0.5)\n",
" completion = client.chat.completion.create(model=deployment, messages=messages, temperature=0.5)\n",
" ```\n",
"\n",
" > Note, the closer to 1.0, the more varied the output.\n",
Expand Down
39 changes: 39 additions & 0 deletions 06-text-generation-apps/study-buddy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
from openai import AzureOpenAI
import os
import dotenv

# import dotenv
dotenv.load_dotenv()

# configure Azure OpenAI service client
client = AzureOpenAI(
azure_endpoint = os.environ["AZURE_OPENAI_ENDPOINT"],
api_key=os.environ['AZURE_OPENAI_KEY'],
api_version = "2023-10-01-preview"
)

deployment=os.environ['AZURE_OPENAI_DEPLOYMENT']

# add your completion code
question = input("Ask your questions on python language to your study buddy: ")
prompt = f"""
You are an expert on the python language.
Whenever certain questions are asked, you need to provide response in below format.
- Concept
- Example code showing the concept implementation
- explanation of the example and how the concept is done for the user to understand better.
Provide answer for the question: {question}
"""
messages = [{"role": "user", "content": prompt}]
# make completion
completion = client.chat.completions.create(model=deployment, messages=messages)

# print response
print(completion.choices[0].message.content)

# very unhappy _____.

# Once upon a time there was a very unhappy mermaid.
36 changes: 16 additions & 20 deletions 08-building-search-applications/notebook-azure-openai-simple.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,21 +16,16 @@
"outputs": [],
"source": [
"import os\n",
"import openai\n",
"from openai import AzureOpenAI\n",
"import numpy as np\n",
"from dotenv import load_dotenv\n",
"load_dotenv()\n",
"\n",
"openai.api_type = \"azure\"\n",
"openai.api_version = os.getenv(\"AZURE_OPENAI_API_VERSION\",\"\").strip()\n",
"\n",
"API_KEY = os.getenv(\"AZURE_OPENAI_API_KEY\",\"\").strip()\n",
"assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n",
"openai.api_key = API_KEY\n",
"\n",
"RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n",
"assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n",
"assert \"openai.azure.com\" in RESOURCE_ENDPOINT.lower(), \"ERROR: Azure OpenAI Endpoint should be in the form: \\n\\n\\t<your unique endpoint identifier>.openai.azure.com\"\n",
"openai.api_base = RESOURCE_ENDPOINT"
"client = AzureOpenAI(\n",
" api_key = os.getenv(\"AZURE_OPENAI_KEY\"), \n",
" api_version = \"2023-05-15\",\n",
" azure_endpoint = os.getenv(\"AZURE_OPENAI_ENDPOINT\")\n",
")"
]
},
{
Expand All @@ -49,7 +44,8 @@
"metadata": {},
"outputs": [],
"source": [
"from openai.embeddings_utils import cosine_similarity"
"def cosine_similarity(a, b):\n",
" return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))"
]
},
{
Expand All @@ -60,8 +56,8 @@
"source": [
"text = 'the quick brown fox jumped over the lazy dog'\n",
"model = 'text-embedding-ada-002'\n",
"openai.Embedding()\\\n",
" .create(input=[text], engine='text-embedding-ada-002')[\"data\"][0][\"embedding\"]"
"\n",
"client.embeddings.create(input = [text], model=model).data[0].embedding"
]
},
{
Expand All @@ -71,10 +67,10 @@
"outputs": [],
"source": [
"# compare several words\n",
"automobile_embedding = openai.Embedding.create(input='automobile', engine=model)[\"data\"][0][\"embedding\"]\n",
"vehicle_embedding = openai.Embedding.create(input='vehicle', engine=model)[\"data\"][0][\"embedding\"]\n",
"dinosaur_embedding = openai.Embedding.create(input='dinosaur', engine=model)[\"data\"][0][\"embedding\"]\n",
"stick_embedding = openai.Embedding.create(input='stick', engine=model)[\"data\"][0][\"embedding\"]\n",
"automobile_embedding = client.embeddings.create(input = 'automobile', model=model).data[0].embedding\n",
"vehicle_embedding = client.embeddings.create(input = 'vehicle', model=model).data[0].embedding\n",
"dinosaur_embedding = client.embeddings.create(input = 'dinosaur', model=model).data[0].embedding\n",
"stick_embedding = client.embeddings.create(input = 'stick', model=model).data[0].embedding\n",
"\n",
"# comparing cosine similarity, automobiles vs automobiles should be 1.0, i.e exactly the same, while automobiles vs dinosaurs should be between 0 and 1, i.e. not the same\n",
"print(cosine_similarity(automobile_embedding, automobile_embedding))\n",
Expand All @@ -100,7 +96,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
"version": "3.10.13"
},
"orig_nbformat": 4
},
Expand Down

0 comments on commit 96efd58

Please sign in to comment.