Skip to content

Commit

Permalink
wac: move OpenAI code to app.internal.openai
Browse files Browse the repository at this point in the history
  • Loading branch information
stintel committed Jan 17, 2024
1 parent 09f6f54 commit 1dfb45c
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 56 deletions.
3 changes: 2 additions & 1 deletion app/internal/command_endpoints/ha_ws.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@

from jsonget import json_get, json_get_default

from app.internal.wac import FEEDBACK, openai_chat, wac_add, wac_search
from app.internal.openai import openai_chat
from app.internal.wac import FEEDBACK, wac_add, wac_search
from . import (
CommandEndpoint,
CommandEndpointResponse,
Expand Down
55 changes: 55 additions & 0 deletions app/internal/openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
from logging import getLogger

from app.settings import get_settings


FORCE_OPENAI_MODEL = None

log = getLogger("WAS")
settings = get_settings()

if settings.openai_api_key is not None:
log.info("Initializing OpenAI Client")
import openai
openai_client = openai.OpenAI(
api_key=settings.openai_api_key, base_url=settings.openai_base_url)
models = openai_client.models.list()
if len(models.data) == 1:
FORCE_OPENAI_MODEL = models.data[0].id
log.info(
f"Only one model on OpenAI endpoint - forcing model '{FORCE_OPENAI_MODEL}'")
else:
openai_client = None


def openai_chat(text, model=settings.openai_model):
log.info(f"OpenAI Chat request for text '{text}'")
response = settings.command_not_found
if FORCE_OPENAI_MODEL is not None:
log.info(f"Forcing model '{FORCE_OPENAI_MODEL}'")
model = FORCE_OPENAI_MODEL
else:
log.info(f"Using model '{model}'")
if openai_client is not None:
try:
chat_completion = openai_client.chat.completions.create(
messages=[
{
"role": "system",
"content": settings.openai_system_prompt,
},
{
"role": "user",
"content": text,
}
],
model=model,
temperature=settings.openai_temperature,
)
response = chat_completion.choices[0].message.content
# Make it friendly for TTS and display output
response = response.replace('\n', ' ').replace('\r', '').lstrip()
log.info(f"Got OpenAI response '{response}'")
except Exception as e:
log.info(f"OpenAI failed with '{e}")
return response
54 changes: 0 additions & 54 deletions app/internal/wac.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
import time

from app.internal.was import construct_url, get_config
from app.settings import get_settings


WAC_LOG_LEVEL = config('WAC_LOG_LEVEL', default="debug", cast=str).upper()
Expand Down Expand Up @@ -78,8 +77,6 @@
COLLECTION = config(
'COLLECTION', default='commands', cast=str)

FORCE_OPENAI_MODEL = None

logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
Expand All @@ -93,8 +90,6 @@
log.exception(f"Set log level {WAC_LOG_LEVEL} failed with {e}")
pass

settings = get_settings()


class WillowAutoCorrectTypesenseStartupException(Exception):
"""Raised when Typesense failed to start
Expand All @@ -118,55 +113,6 @@ def init_wac(app):
app.wac_enabled = True


# OpenAI
if settings.openai_api_key is not None:
log.info(f"Initializing OpenAI Client")
import openai
openai_client = openai.OpenAI(
api_key=settings.openai_api_key, base_url=settings.openai_base_url)
models = openai_client.models.list()
if len(models.data) == 1:
FORCE_OPENAI_MODEL = models.data[0].id
log.info(
f"Only one model on OpenAI endpoint - forcing model '{FORCE_OPENAI_MODEL}'")
else:
openai_client = None

# OpenAI Chat


def openai_chat(text, model=settings.openai_model):
log.info(f"OpenAI Chat request for text '{text}'")
response = settings.command_not_found
if FORCE_OPENAI_MODEL is not None:
log.info(f"Forcing model '{FORCE_OPENAI_MODEL}'")
model = FORCE_OPENAI_MODEL
else:
log.info(f"Using model '{model}'")
if openai_client is not None:
try:
chat_completion = openai_client.chat.completions.create(
messages=[
{
"role": "system",
"content": settings.openai_system_prompt,
},
{
"role": "user",
"content": text,
}
],
model=model,
temperature=settings.openai_temperature,
)
response = chat_completion.choices[0].message.content
# Make it friendly for TTS and display output
response = response.replace('\n', ' ').replace('\r', '').lstrip()
log.info(f"Got OpenAI response '{response}'")
except Exception as e:
log.info(f"OpenAI failed with '{e}")
return response

# Typesense


Expand Down
3 changes: 2 additions & 1 deletion app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@
CommandEndpointRuntimeException
)
from app.internal.command_endpoints.main import init_command_endpoint
from app.internal.wac import FEEDBACK, init_wac, openai_chat, wac_add, wac_search
from app.internal.openai import openai_chat
from app.internal.wac import FEEDBACK, init_wac, wac_add, wac_search
from app.internal.was import (
build_msg,
get_tz_config,
Expand Down

0 comments on commit 1dfb45c

Please sign in to comment.