Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for Azure, OpenAI, Palm, Anthropic, Cohere Models - using litellm #80

Open
wants to merge 1 commit into
base: stable
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
ffmpeg
python-dotenv
gradio==3.38.0
litellm==0.1.226
openai
tiktoken
tinydb
Expand Down
41 changes: 41 additions & 0 deletions shortGPT/gpt/gpt_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from time import sleep, time

import openai
import litellm
import tiktoken
import yaml

Expand Down Expand Up @@ -99,3 +100,43 @@ def gpt3Turbo_completion(chat_prompt="", system="You are an AI that can give the
raise Exception("GPT3 error: %s" % oops)
print('Error communicating with OpenAI:', oops)
sleep(1)


def liteLLM_completion(chat_prompt="", system="You are an AI that can give the answer to anything", temp=0.7, model="gpt-3.5-turbo", max_tokens=1000, remove_nl=True, conversation=None):
# use this for cohere, anthropic by
# liteLLM_completion(chat_prompt, model="claude-v2")
# liteLLM_completion(chat_prompt, model="claude-instant-1")
# liteLLM_completion(chat_prompt, model="command-nightly")
# liteLLM checks env/.env for API keys and handles AUTH
# eg os.environ['OPENAI_API_KEY'], more info on supported models here: https://litellm.readthedocs.io/en/latest/supported/
max_retry = 5
retry = 0
while True:
try:
if conversation:
messages = conversation
else:
messages = [
{"role": "system", "content": system},
{"role": "user", "content": chat_prompt}
]
response = litellm.completion(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temp)
text = response['choices'][0]['message']['content'].strip()
if remove_nl:
text = re.sub('\s+', ' ', text)
filename = '%s_gpt3.txt' % time()
if not os.path.exists('.logs/gpt_logs'):
os.makedirs('.logs/gpt_logs')
with open('.logs/gpt_logs/%s' % filename, 'w', encoding='utf-8') as outfile:
outfile.write(f"System prompt: ===\n{system}\n===\n"+f"Chat prompt: ===\n{chat_prompt}\n===\n" + f'RESPONSE:\n====\n{text}\n===\n')
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
raise Exception("liteLLM error: %s" % oops)
print('Error communicating with OpenAI:', oops)
sleep(1)