From 1dfb45c6e83e33765d4571549dee75e41d8bab58 Mon Sep 17 00:00:00 2001 From: Stijn Tintel Date: Wed, 17 Jan 2024 11:51:24 +0200 Subject: [PATCH] wac: move OpenAI code to app.internal.openai --- app/internal/command_endpoints/ha_ws.py | 3 +- app/internal/openai.py | 55 +++++++++++++++++++++++++ app/internal/wac.py | 54 ------------------------ app/main.py | 3 +- 4 files changed, 59 insertions(+), 56 deletions(-) create mode 100644 app/internal/openai.py diff --git a/app/internal/command_endpoints/ha_ws.py b/app/internal/command_endpoints/ha_ws.py index d4f412d..00623eb 100644 --- a/app/internal/command_endpoints/ha_ws.py +++ b/app/internal/command_endpoints/ha_ws.py @@ -8,7 +8,8 @@ from jsonget import json_get, json_get_default -from app.internal.wac import FEEDBACK, openai_chat, wac_add, wac_search +from app.internal.openai import openai_chat +from app.internal.wac import FEEDBACK, wac_add, wac_search from . import ( CommandEndpoint, CommandEndpointResponse, diff --git a/app/internal/openai.py b/app/internal/openai.py new file mode 100644 index 0000000..f6e6b15 --- /dev/null +++ b/app/internal/openai.py @@ -0,0 +1,55 @@ +from logging import getLogger + +from app.settings import get_settings + + +FORCE_OPENAI_MODEL = None + +log = getLogger("WAS") +settings = get_settings() + +if settings.openai_api_key is not None: + log.info("Initializing OpenAI Client") + import openai + openai_client = openai.OpenAI( + api_key=settings.openai_api_key, base_url=settings.openai_base_url) + models = openai_client.models.list() + if len(models.data) == 1: + FORCE_OPENAI_MODEL = models.data[0].id + log.info( + f"Only one model on OpenAI endpoint - forcing model '{FORCE_OPENAI_MODEL}'") +else: + openai_client = None + + +def openai_chat(text, model=settings.openai_model): + log.info(f"OpenAI Chat request for text '{text}'") + response = settings.command_not_found + if FORCE_OPENAI_MODEL is not None: + log.info(f"Forcing model '{FORCE_OPENAI_MODEL}'") + model = FORCE_OPENAI_MODEL + else: + log.info(f"Using model '{model}'") + if openai_client is not None: + try: + chat_completion = openai_client.chat.completions.create( + messages=[ + { + "role": "system", + "content": settings.openai_system_prompt, + }, + { + "role": "user", + "content": text, + } + ], + model=model, + temperature=settings.openai_temperature, + ) + response = chat_completion.choices[0].message.content + # Make it friendly for TTS and display output + response = response.replace('\n', ' ').replace('\r', '').lstrip() + log.info(f"Got OpenAI response '{response}'") + except Exception as e: + log.info(f"OpenAI failed with '{e}") + return response diff --git a/app/internal/wac.py b/app/internal/wac.py index 378ab24..a0466a7 100644 --- a/app/internal/wac.py +++ b/app/internal/wac.py @@ -13,7 +13,6 @@ import time from app.internal.was import construct_url, get_config -from app.settings import get_settings WAC_LOG_LEVEL = config('WAC_LOG_LEVEL', default="debug", cast=str).upper() @@ -78,8 +77,6 @@ COLLECTION = config( 'COLLECTION', default='commands', cast=str) -FORCE_OPENAI_MODEL = None - logging.basicConfig( format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, @@ -93,8 +90,6 @@ log.exception(f"Set log level {WAC_LOG_LEVEL} failed with {e}") pass -settings = get_settings() - class WillowAutoCorrectTypesenseStartupException(Exception): """Raised when Typesense failed to start @@ -118,55 +113,6 @@ def init_wac(app): app.wac_enabled = True -# OpenAI -if settings.openai_api_key is not None: - log.info(f"Initializing OpenAI Client") - import openai - openai_client = openai.OpenAI( - api_key=settings.openai_api_key, base_url=settings.openai_base_url) - models = openai_client.models.list() - if len(models.data) == 1: - FORCE_OPENAI_MODEL = models.data[0].id - log.info( - f"Only one model on OpenAI endpoint - forcing model '{FORCE_OPENAI_MODEL}'") -else: - openai_client = None - -# OpenAI Chat - - -def openai_chat(text, model=settings.openai_model): - log.info(f"OpenAI Chat request for text '{text}'") - response = settings.command_not_found - if FORCE_OPENAI_MODEL is not None: - log.info(f"Forcing model '{FORCE_OPENAI_MODEL}'") - model = FORCE_OPENAI_MODEL - else: - log.info(f"Using model '{model}'") - if openai_client is not None: - try: - chat_completion = openai_client.chat.completions.create( - messages=[ - { - "role": "system", - "content": settings.openai_system_prompt, - }, - { - "role": "user", - "content": text, - } - ], - model=model, - temperature=settings.openai_temperature, - ) - response = chat_completion.choices[0].message.content - # Make it friendly for TTS and display output - response = response.replace('\n', ' ').replace('\r', '').lstrip() - log.info(f"Got OpenAI response '{response}'") - except Exception as e: - log.info(f"OpenAI failed with '{e}") - return response - # Typesense diff --git a/app/main.py b/app/main.py index 94baa32..0bf75fe 100644 --- a/app/main.py +++ b/app/main.py @@ -29,7 +29,8 @@ CommandEndpointRuntimeException ) from app.internal.command_endpoints.main import init_command_endpoint -from app.internal.wac import FEEDBACK, init_wac, openai_chat, wac_add, wac_search +from app.internal.openai import openai_chat +from app.internal.wac import FEEDBACK, init_wac, wac_add, wac_search from app.internal.was import ( build_msg, get_tz_config,