Skip to content

Commit

Permalink
Merge pull request #498 from TransformerOptimus/gpt4-32k
Browse files Browse the repository at this point in the history
Gpt4 32k
  • Loading branch information
TransformerOptimus committed Jun 26, 2023
2 parents a4d5c85 + 66e97b7 commit 20863a5
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 12 deletions.
16 changes: 7 additions & 9 deletions config_template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,11 @@ OPENAI_API_KEY: YOUR_OPEN_API_KEY
OPENAI_API_BASE: https://api.openai.com/v1
#OPENAI_API_BASE: "http://super__tgwui:5001/v1"

# "gpt-3.5-turbo-0301": 4032, "gpt-4-0314": 8092, "gpt-3.5-turbo": 4032, "gpt-4": 8092, "llama":2048, "mpt-7b-storywriter":45000
# "gpt-3.5-turbo-0301": 4032, "gpt-4-0314": 8092, "gpt-3.5-turbo": 4032, "gpt-4": 8092, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "llama":2048, "mpt-7b-storywriter":45000
MODEL_NAME: "gpt-3.5-turbo-0301"
MAX_TOOL_TOKEN_LIMIT: 800
MAX_MODEL_TOKEN_LIMIT: 4032 # set to 2048 for llama

# For running stable diffusion
STABILITY_API_KEY: YOUR_STABILITY_API_KEY
#Engine IDs that can be used: 'stable-diffusion-v1', 'stable-diffusion-v1-5','stable-diffusion-512-v2-0', 'stable-diffusion-768-v2-0','stable-diffusion-512-v2-1','stable-diffusion-768-v2-1','stable-diffusion-xl-beta-v2-2-2'
ENGINE_ID: "stable-diffusion-xl-beta-v2-2-2"


#DATABASE INFO
# redis details
DB_NAME: super_agi_main
Expand Down Expand Up @@ -56,7 +50,6 @@ FRONTEND_URL: "http://localhost:3000"
#ENCRPYTION KEY
ENCRYPTION_KEY: secret


#WEAVIATE

# If you are using docker or web hosted uncomment the next two lines and comment the third one
Expand Down Expand Up @@ -93,4 +86,9 @@ JIRA_USERNAME: YOUR_JIRA_EMAIL
JIRA_API_TOKEN: YOUR_JIRA_API_TOKEN

#SLACK
SLACK_BOT_TOKEN: YOUR_SLACK_BOT_TOKEN
SLACK_BOT_TOKEN: YOUR_SLACK_BOT_TOKEN

# For running stable diffusion
STABILITY_API_KEY: YOUR_STABILITY_API_KEY
#Engine IDs that can be used: 'stable-diffusion-v1', 'stable-diffusion-v1-5','stable-diffusion-512-v2-0', 'stable-diffusion-768-v2-0','stable-diffusion-512-v2-1','stable-diffusion-768-v2-1','stable-diffusion-xl-beta-v2-2-2'
ENGINE_ID: "stable-diffusion-xl-beta-v2-2-2"
2 changes: 1 addition & 1 deletion gui/pages/Content/Agents/AgentCreate.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ export default function AgentCreate({sendAgentData, selectedProjectId, fetchAgen
const [goals, setGoals] = useState(['Describe the agent goals here']);
const [instructions, setInstructions] = useState(['']);

const models = ['gpt-4', 'gpt-3.5-turbo','gpt-3.5-turbo-16k']
const models = ['gpt-4', 'gpt-3.5-turbo','gpt-3.5-turbo-16k', 'gpt-4-32k']
const [model, setModel] = useState(models[1]);
const modelRef = useRef(null);
const [modelDropdown, setModelDropdown] = useState(false);
Expand Down
4 changes: 2 additions & 2 deletions superagi/helper/token_counter.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def token_limit(model: str = "gpt-3.5-turbo-0301") -> int:
int: The token limit.
"""
try:
model_token_limit_dict = {"gpt-3.5-turbo-0301": 4032, "gpt-4-0314": 8092, "gpt-3.5-turbo": 4032, "gpt-4": 8092,"gpt-3.5-turbo-16k": 16184}
model_token_limit_dict = {"gpt-3.5-turbo-0301": 4032, "gpt-4-0314": 8092, "gpt-3.5-turbo": 4032, "gpt-4": 8092,"gpt-3.5-turbo-16k": 16184, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768}
return model_token_limit_dict[model]
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
Expand All @@ -43,7 +43,7 @@ def count_message_tokens(messages: List[BaseMessage], model: str = "gpt-3.5-turb
int: The number of tokens in the messages.
"""
try:
model_token_per_message_dict = {"gpt-3.5-turbo-0301": 4, "gpt-4-0314": 3, "gpt-3.5-turbo": 4, "gpt-4": 3,"gpt-3.5-turbo-16k":4}
model_token_per_message_dict = {"gpt-3.5-turbo-0301": 4, "gpt-4-0314": 3, "gpt-3.5-turbo": 4, "gpt-4": 3,"gpt-3.5-turbo-16k":4, "gpt-4-32k": 3, "gpt-4-32k-0314": 3}
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
Expand Down
28 changes: 28 additions & 0 deletions tests/unit_tests/helper/test_token_counter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import pytest

from typing import List
from superagi.helper.token_counter import TokenCounter
from superagi.types.common import BaseMessage
from unittest.mock import MagicMock


def test_token_limit():
assert TokenCounter.token_limit("gpt-3.5-turbo-0301") == 4032
assert TokenCounter.token_limit("gpt-4-0314") == 8092
assert TokenCounter.token_limit("gpt-3.5-turbo") == 4032
assert TokenCounter.token_limit("gpt-4") == 8092
assert TokenCounter.token_limit("gpt-3.5-turbo-16k") == 16184
assert TokenCounter.token_limit("gpt-4-32k") == 32768
assert TokenCounter.token_limit("gpt-4-32k-0314") == 32768
assert TokenCounter.token_limit("non_existing_model") == 8092


def test_count_text_tokens():
# You might need to adjust the hardcoded values in the TokenCounter.count_text_tokens function
# and update the expected tokens accordingly if the function logic is changed.

text = "You are a helpful assistant."
assert TokenCounter.count_text_tokens(text) == 10

text = "What is your name?"
assert TokenCounter.count_text_tokens(text) == 9

0 comments on commit 20863a5

Please sign in to comment.