Skip to content

Commit

Permalink
Warning for missing pgvector import. Define response_type
Browse files Browse the repository at this point in the history
  • Loading branch information
uogbuji committed Jun 25, 2024
1 parent 3c9608f commit cfafc15
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 1 deletion.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ Notable changes to Format based on [Keep a Changelog](https://keepachangelog.co

- `joiner` param to `text_helper.text_split()` for better control of regex separator handling
- query filter mix-in, `embedding.pgvector.match_oneof()`, for use with `meta_filter` argument to `DB.search`
- `llm_wrapper.response_type` to differentiate tool calling vs regular LLM responses

### Changed

Expand Down
4 changes: 3 additions & 1 deletion pylib/embedding/pgvector.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@
from pgvector.asyncpg import register_vector
PREREQS_AVAILABLE = True
POOL_TYPE = asyncpg.pool.Pool
except ImportError:
except ImportError as e:
import warnings
warnings.warn(f'Missing module {e.name}; required for using PGVector')
PREREQS_AVAILABLE = False
asyncpg = None
register_vector = object() # Set up a dummy to satisfy the type hints
Expand Down
9 changes: 9 additions & 0 deletions pylib/llm_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import concurrent.futures
from functools import partial
from typing import List
from enum import Enum

from amara3 import iri

Expand All @@ -41,6 +42,11 @@
DUMMY_MODEL = 'DUMMY_MODEL'


class response_type(Enum):
MESSAGE = 1
TOOL_CALL = 2


class llm_response(config.attr_dict):
'''
Uniform interface for LLM responses from Open
Expand All @@ -52,6 +58,7 @@ def from_openai_chat(response):
'''
# print(f'from_openai_chat: {response =}')
resp = llm_response(response)
resp['response_type'] = response_type.MESSAGE # Default assumption
if 'usage' in resp:
resp['usage'] = llm_response(resp['usage'])
resp['prompt_tokens'] = resp.usage.prompt_tokens
Expand All @@ -72,11 +79,13 @@ def from_openai_chat(response):
rc1 = resp['choices'][0]
# No response message content if a tool call is invoked
if rc1.get('message', {}).get('tool_calls'):
resp['response_type'] = response_type.TOOL_CALL
# WTH does OpenAI have these arguments properties as plain text? Seems a massive layering violation
for tc in rc1['message']['tool_calls']:
tc['function']['arguments_obj'] = json.loads(tc['function']['arguments'])
else:
resp['first_choice_text'] = rc1['text'] if 'text' in rc1 else rc1['message']['content']
# print(f'from_openai_chat: {rc1 =}')
else:
resp['first_choice_text'] = resp['content']
return resp
Expand Down

0 comments on commit cfafc15

Please sign in to comment.