Skip to content

Commit

Permalink
Merge pull request #32 from ijwfly/feature/gpt-4o
Browse files Browse the repository at this point in the history
GPT-4o Support
  • Loading branch information
ijwfly committed May 13, 2024
2 parents c4dfac9 + b591d38 commit 51ccc16
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 3 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

This GitHub repository contains the implementation of a telegram bot, designed to facilitate seamless interaction with GPT-3.5 and GPT-4, state-of-the-art language models by OpenAI.

🔥 **GPT-4 Turbo support (with vision)**
🔥 **GPT-4o support (with vision)**
🔥 **Custom OpenAI API compatible endpoints support (see `app/llm_models.py` for example of using WizardLM-2 8x22b via OpenRouter.ai)**
🔥 **DALL-E 3 Image generation support**

Expand Down
3 changes: 2 additions & 1 deletion app/bot/settings_menu.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@
from app.storage.user_role import check_access_conditions, UserRole

GPT_MODELS_OPTIONS = {
'gpt-3.5-turbo': 'GPT-3.5',
'gpt-4o': 'GPT-4o',
'gpt-4-turbo': 'GPT-4-Turbo',
'gpt-3.5-turbo': 'GPT-3.5',
}

ALL_MODELS_OPTIONS = list(get_models().keys())
Expand Down
22 changes: 22 additions & 0 deletions app/llm_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ class LLModel:
GPT_35_TURBO_16K = 'gpt-3.5-turbo-16k'
GPT_4 = 'gpt-4'
GPT_4_TURBO = 'gpt-4-turbo'
GPT_4O = 'gpt-4o'
GPT_4_TURBO_PREVIEW = 'gpt-4-turbo-preview'
GPT_4_VISION_PREVIEW = 'gpt-4-vision-preview'
OPENROUTER_WIZARDLM2 = 'microsoft/wizardlm-2-8x22b'
Expand Down Expand Up @@ -109,6 +110,27 @@ def get_models():
),
base_url=settings.OPENAI_BASE_URL,
),
LLModel.GPT_4O: LLModel(
model_name=LLModel.GPT_4O,
model_readable_name='GPT-4o',
api_key=settings.OPENAI_TOKEN,
minimum_user_role=settings.USER_ROLE_CHOOSE_MODEL,
context_configuration=LLMContextConfiguration(
short_term_memory_tokens=8 * 1024,
summary_length=2048,
hard_max_context_size=13 * 1024,
),
model_price=LLMPrice(
input_tokens_price=Decimal('0.005'),
output_tokens_price=Decimal('0.015'),
),
capabilities=LLMCapabilities(
function_calling=True,
image_processing=True,
streaming_responses=True,
),
base_url=settings.OPENAI_BASE_URL,
),

# Deprecated OpenAI models
LLModel.GPT_35_TURBO_16K: LLModel(
Expand Down
2 changes: 2 additions & 0 deletions app/openai_helpers/count_tokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ def count_string_tokens(string: str, model="gpt-3.5-turbo") -> int:
model = "gpt-3.5-turbo"
elif "gpt-4" in model:
model = "gpt-4"
elif "gpt-4o" in model:
model = "gpt-4o"
else:
# TODO: add method to calculate tokens for different models
model = "gpt-4"
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ pytz==2023.3
regex==2023.6.3
requests==2.31.0
sniffio==1.3.0
tiktoken==0.4.0
tiktoken==0.7.0
tqdm==4.65.0
typing_extensions==4.8.0
urllib3==2.0.3
Expand Down

0 comments on commit 51ccc16

Please sign in to comment.