Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,250 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
"metadata": {},
"source": [
"# AI Client Template\n",
"This is a generic template for calling an AI of your choice, selected in .env's API_NAME\n",
"\n",
"# Connecting to AI\n",
"\n",
"The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI. \n",
"\n",
"Enter the credentials including the base URL, API Key, and model of your favorite AI APIs in your .env file\n",
"\n",
"For example,\n",
"\n",
"```\n",
"OPENAI_API_KEY=<your OPENAI api key>\n",
"OPENAI_BASE_URL=https://api.openai.com/v1/\n",
"OPENAI_MODEL=gpt-5-nano\n",
"\n",
"OLLAMA_API_KEY=ollama\n",
"OLLAMA_BASE_URL=http://localhost:11434/v1\n",
"OLLAMA_MODEL=llama3.2\n",
"\n",
"GEMINI_API_KEY=<your GEMINI api key>\n",
"GEMINI_MODEL=gpt-5-nano\n",
"GEMINI_BASE_URL=\n",
"\n",
"DEFAULT_API=OPENAI\n",
"```\n",
"\n",
"If you'd like to use free Ollama, select API_NAME=OLLAMA\n",
"If you'd like to use OpenAI, select API_NAME=OPENAI\n",
"\n",
"## troubleshooting\n",
"\n",
"Please see the [troubleshooting](../setup/troubleshooting.ipynb) notebook in the setup folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using OPENAI\n",
"API key found!\n",
"Model: gpt-5-nano\n",
"Base URL: https://api.openai.com/v1/\n"
]
}
],
"source": [
"# Load environment variables in a file called .env and initialize api_client\n",
"\n",
"load_dotenv(override=True)\n",
"api_name = os.getenv('DEFAULT_API')\n",
"api_key = os.getenv(api_name + '_API_KEY')\n",
"model = os.getenv(api_name + '_MODEL')\n",
"base_url=os.getenv(api_name + '_BASE_URL')\n",
"\n",
"if not api_name:\n",
" print(\"No API name found\")\n",
"else:\n",
" print(f\"Using {api_name}\")\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(f\"API key found!\")\n",
"\n",
"if not model:\n",
" print(\"No model was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An model was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(f\"Model: {model}\")\n",
"\n",
"if not base_url:\n",
" print(\"No base url found\")\n",
"else:\n",
" print(f\"Base URL: {base_url}\")\n",
"\n",
"from openai import OpenAI\n",
"\n",
"\n",
"api_client = OpenAI(base_url=base_url, api_key=api_key)\n"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "80ce2d13",
"metadata": {},
"outputs": [],
"source": [
"# Helper functions\n",
"\n",
"def messages(system_prompt, user_prompt):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]\n",
"\n",
"def get_response(system_prompt, user_prompt):\n",
" response = api_client.chat.completions.create(\n",
" model = model,\n",
" messages = messages(system_prompt, user_prompt)\n",
" )\n",
" assert response is not None, \"could not resolve response (should never happen)\"\n",
" return response.choices[0].message.content\n",
"\n",
"\n",
"def display_response(system_prompt, user_prompt):\n",
" display(Markdown(get_response(system_prompt, user_prompt)))"
]
},
{
"cell_type": "markdown",
"id": "5fa7e13b",
"metadata": {},
"source": [
"# Define system and user prompts\n",
"\n",
"## Types of prompts\n",
"\n",
"You may know this already - but if not, you will get very familiar with it!\n",
"\n",
"Models like GPT have been trained to receive instructions in a particular way.\n",
"\n",
"They expect to receive:\n",
"\n",
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
"\n",
"**A user prompt** -- the conversation starter that they should reply to"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {},
"outputs": [],
"source": [
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
"\n",
"system_prompt = \"\"\"\n",
"You are a snarky AI assistant that always provides short, snarky, humorous responses.\n",
"Respond in markdown. Do not wrap the markdown in a code block - respond just with the markdown.\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {},
"outputs": [],
"source": [
"# Define our user prompt\n",
"\n",
"user_prompt = \"\"\"\n",
"Please give me a fun fact about space.\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {},
"outputs": [],
"source": [
"# A function to display this nicely in the output, using markdown\n"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"Fun fact: Space isn’t empty—it's a near-perfect vacuum full of particles and radiation. There are more stars in the observable universe than grains of sand on Earth."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_response(user_prompt=user_prompt, system_prompt=system_prompt)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "llm-engineering",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
"""
AI Client Template
This is a generic template for calling an AI of your choice, selected in .env's DEFAULT_API

# Connecting to AI

The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.

Enter the credentials including the base URL, API Key, and default_model of your favorite AI APIs in your .env file

For example,

```
OPENAI_API_KEY=<your OPENAI api key>
OPENAI_MODEL=gpt-5-nano
OPENAI_BASE_URL=

GEMINI_API_KEY=<your GEMINI api key>
GEMINI_MODEL=gpt-5-nano
GEMINI_BASE_URL=

OLLAMA_API_KEY=ollama
OLLAMA_BASE_URL=http://localhost:11434/v1
OLLAMA_MODEL=llama3.2

DEFAULT_API=OLLAMA
```

If you'd like to use free Ollama, select DEFAULT_API=OLLAMA
If you'd like to use OpenAI, select DEFAULT_API=OPENAI

## troubleshooting

Please see the [troubleshooting](../setup/troubleshooting.ipynb) notebook in the setup folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.
"""

# imports

import os
from urllib import response
from dotenv import load_dotenv
from IPython.display import Markdown, display
from openai import OpenAI

# If you get an error running this cell, then please head over to the troubleshooting notebook!

# Load environment variables in a file called .env and initialize api_client

load_dotenv(override=True)
default_api = os.getenv('DEFAULT_API')
default_api_key = os.getenv(default_api + '_API_KEY')
default_model = os.getenv(default_api + '_MODEL')
default_base_uri=os.getenv(default_api + '_BASE_URL')

if default_api:
print(f"Using {default_api}")
# Check the key

if not default_api_key:
print("No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!")
elif default_api_key.strip() != default_api_key:
print("An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook")
else:
print(f"API key found!")

if not default_model:
print("No default_model was found - please head over to the troubleshooting notebook in this folder to identify & fix!")
elif default_api_key.strip() != default_api_key:
print("An default_model was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook")
else:
print(f"Model: {default_model}")

if not default_base_uri:
print("No base url found")
else:
print(f"Base URL: {default_base_uri}")

from openai import OpenAI


api_client = OpenAI(base_url=default_base_uri, api_key=default_api_key)

# Helper functions

def messages(system_prompt, user_prompt):
return [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]

def get_response(system_prompt, user_prompt):
response = api_client.chat.completions.create(
model = default_model,
messages = messages(system_prompt, user_prompt)
)
assert response is not None, "could not resolve response (should never happen)"
return response.choices[0].message.content


"""
Define system and user prompts

## Types of prompts

You may know this already - but if not, you will get very familiar with it!

Models like GPT have been trained to receive instructions in a particular way.

They expect to receive:

**A system prompt** that tells them what task they are performing and what tone they should use

**A user prompt** -- the conversation starter that they should reply to
"""

# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish."

system_prompt = """
You are a snarky AI assistant that always provides short, snarky, humorous responses.
Respond in markdown. Do not wrap the markdown in a code block - respond just with the markdown.
"""

# Define our user prompt

user_prompt = """
Please give me a fun fact about space.
"""

# A function to display this nicely in the output, using markdown

print(get_response(system_prompt=system_prompt, user_prompt=user_prompt))
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
OPENAI_API_KEY=<your_openai_api_key_here>
OPENAI_BASE_URL=https://api.openai.com/v1/
OPENAI_MODEL=gpt-5-nano

OLLAMA_API_KEY=ollama
OLLAMA_BASE_URL=http://localhost:11434/v1
OLLAMA_MODEL=llama3.2:latest

DEFAULT_API=OLLAMA
Loading