Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add Anthropic support #288

Merged
merged 26 commits into from
Apr 21, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
a1b9b28
add Anthropic support #283
ocss884 Sep 14, 2023
b242cc0
correct lint and format
ocss884 Sep 14, 2023
201a4e2
refresh poetry.lock
ocss884 Sep 15, 2023
de6d6ee
Use union instead of "|" for py3.8
ocss884 Sep 15, 2023
c3265f8
Create pull.yml
ocss884 Oct 6, 2023
e735d9a
Merge branch 'camel-ai:master' into master
ocss884 Nov 10, 2023
c0e55ea
Merge branch 'camel-ai:master' into master
ocss884 Nov 18, 2023
35b3673
Merge branch 'camel-ai:master' into master
ocss884 Dec 5, 2023
808b2cf
update to "master"
ocss884 Dec 5, 2023
ff6f94c
apply #288, migrate to openai v1.0.0
ocss884 Dec 5, 2023
e597c05
update lock file and correct code format
ocss884 Dec 27, 2023
4fde44a
Merge branch 'master' into anthropic_support
ocss884 Dec 27, 2023
1f85339
bug fix in commons.py
ocss884 Jan 2, 2024
72bfdef
update test file and remove mistakenly tracked pull.yml
ocss884 Jan 3, 2024
faec388
Merge remote-tracking branch 'upstream/master'
ocss884 Mar 17, 2024
fcf4511
Merge branch 'master' into anthropic_support
ocss884 Mar 17, 2024
fe368e1
update lock file
ocss884 Mar 17, 2024
9dd1822
limit openai version<1.14.0
ocss884 Mar 17, 2024
a48a57e
add docstring and improve api_key checking logic
ocss884 Mar 24, 2024
2f5d17f
Merge branch 'master' of https://github.com/camel-ai/camel
ocss884 Apr 15, 2024
156402e
Merge branch 'master' into anthropic_support
ocss884 Apr 15, 2024
f202c7c
add Claude3 support & migrate to Message endpoint
ocss884 Apr 17, 2024
85bbb18
small fix
ocss884 Apr 17, 2024
1138868
Merge branch 'master' into anthropic_support
ocss884 Apr 17, 2024
425a531
Merge branch 'master' into anthropic_support
Wendong-Fan Apr 17, 2024
cdc6744
Merge branch 'master' into anthropic_support
Wendong-Fan Apr 21, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions camel/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,11 +206,11 @@ class AnthropicConfig(BaseConfig):
(default: :obj:`False`)

"""
max_tokens_to_sample: int = 256
max_tokens: int = 256
stop_sequences: Union[List[str], NotGiven] = NOT_GIVEN
temperature: float = 1
top_p: float = 0.7
top_k: int = 5
top_p: Union[float, NotGiven] = NOT_GIVEN
top_k: Union[int, NotGiven] = NOT_GIVEN
metadata: NotGiven = NOT_GIVEN
stream: bool = False

Expand Down
50 changes: 14 additions & 36 deletions camel/models/anthropic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,43 +12,34 @@
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import os
from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, Optional

from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic
from anthropic.types import Completion
from openai import Stream
from anthropic import Anthropic
from anthropic._types import NOT_GIVEN

from camel.configs import ANTHROPIC_API_PARAMS
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
from camel.types import ChatCompletion, ModelType
from camel.utils import AnthropicTokenCounter, BaseTokenCounter
from camel.utils.token_counting import messages_to_prompt


class AnthropicModel(BaseModelBackend):
r"""Anthropic API in a unified BaseModelBackend interface."""

def __init__(self, model_type: ModelType,
model_config_dict: Dict[str, Any]) -> None:

super().__init__(model_type, model_config_dict)

self.client = Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
self._token_counter: Optional[BaseTokenCounter] = None

def _convert_openai_messages_to_anthropic_prompt(
self, messages: List[OpenAIMessage]):
return messages_to_prompt(messages, self.model_type)

def _convert_response_from_anthropic_to_openai(self, response: Completion):
def _convert_response_from_anthropic_to_openai(self, response):
# openai ^1.0.0 format, reference openai/types/chat/chat_completion.py
obj = ChatCompletion.construct(
id=None, choices=[
dict(
index=0, message={
"role": "assistant",
"content": response.completion
"content": response.content[0].text
}, finish_reason=response.stop_reason)
], created=None, model=response.model, object="chat.completion")
return obj
Expand All @@ -65,21 +56,6 @@ def token_counter(self) -> BaseTokenCounter:
self._token_counter = AnthropicTokenCounter(self.model_type)
return self._token_counter

def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
r"""Count the number of tokens from a user-assisstant alternating
message list (the OpenAI format).

Args:
messages (List[OpenAIMessage]): Message list with the chat history
in OpenAI API format.

Returns:
int: The number of tokens in the message list after transformed
into Anthropic prompting format.
"""
prompt = self._convert_openai_messages_to_anthropic_prompt(messages)
return self.count_tokens_from_prompt(prompt)

def count_tokens_from_prompt(self, prompt: str) -> int:
r"""Count the number of tokens from a prompt.

Expand All @@ -93,8 +69,8 @@ def count_tokens_from_prompt(self, prompt: str) -> int:

def run(
self,
messages: List[OpenAIMessage],
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
messages,
):
r"""Run inference of Anthropic chat completion.

Args:
Expand All @@ -105,10 +81,12 @@ def run(
Dict[str, Any]: Response in the OpenAI API format.
"""

prompt = self._convert_openai_messages_to_anthropic_prompt(messages)
response = self.client.completions.create(
model=self.model_type.value,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
if messages[0]["role"] == "system":
sys_msg = messages.pop(0)["content"] # type: ignore
else:
sys_msg = NOT_GIVEN
response = self.client.messages.create( # type: ignore
model=self.model_type.value, system=sys_msg, messages=messages,
**self.model_config_dict)

# format response to openai format
Expand Down
36 changes: 28 additions & 8 deletions camel/types/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,16 @@ class ModelType(Enum):
VICUNA = "vicuna"
VICUNA_16K = "vicuna-16k"

CLAUDE_2 = "claude-2"
CLAUDE_INSTANT = "claude-instant-1"
# Legacy anthropic models
# NOTE: anthropic lagecy models only Claude 2.1 has system prompt support
CLAUDE_2_1 = "claude-2.1"
CLAUDE_2_0 = "claude-2.0"
CLAUDE_INSTANT_1_2 = "claude-instant-1.2"

# 3 models
CLAUDE_3_OPUS = "claude-3-opus-20240229"
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"

@property
def value_for_tiktoken(self) -> str:
Expand Down Expand Up @@ -72,10 +80,14 @@ def is_anthropic(self) -> bool:
Returns:
bool: Whether this type of models is anthropic.
"""
if self.name in {"CLAUDE_2", "CLAUDE_INSTANT"}:
return True
else:
return False
return self in {
ModelType.CLAUDE_INSTANT_1_2,
ModelType.CLAUDE_2_0,
ModelType.CLAUDE_2_1,
ModelType.CLAUDE_3_OPUS,
ModelType.CLAUDE_3_SONNET,
ModelType.CLAUDE_3_HAIKU,
}

@property
def token_limit(self) -> int:
Expand Down Expand Up @@ -104,8 +116,16 @@ def token_limit(self) -> int:
return 2048
elif self is ModelType.VICUNA_16K:
return 16384
elif self in {ModelType.CLAUDE_2, ModelType.CLAUDE_INSTANT}:
return 100_000
elif self.is_anthropic:
if self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
return 100_000
elif self in {
ModelType.CLAUDE_2_1,
ModelType.CLAUDE_3_OPUS,
ModelType.CLAUDE_3_SONNET,
ModelType.CLAUDE_3_HAIKU,
}:
return 200_000
else:
raise ValueError("Unknown model type")

Expand Down
23 changes: 0 additions & 23 deletions camel/utils/token_counting.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,29 +73,6 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
else:
ret += role + ":"
return ret
elif model.is_anthropic:

# use XML tag to decorate prompt
# https://docs.anthropic.com/claude/docs/constructing-a-prompt#mark-different-parts-of-the-prompt
# https://docs.anthropic.com/claude/docs/roleplay-dialogue
# https://docs.anthropic.com/claude/docs/human-and-assistant-formatting
system_message = str(system_message)
ret = f"\n{system_message}\n"
for msg in messages[1:]:
role, content = msg["role"], msg["content"]
# Claude does not perform well if the system message RULE OF USER/ASSISTANT is sent twice. (see role_playing/RolePlaying.init_chat())
# Here is a special treatment for Claude to remove the redundant system message.
if not isinstance(content, str):
raise ValueError("Currently multimodal context is not "
"supported by the token counter.")
if content.startswith(system_message):
ret = content
continue
if role == "user":
ret += f"{HUMAN_PROMPT} {content}"
elif role == "assisstant":
ret += f"{AI_PROMPT} {content}"
return ret
else:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why did this part deleted? @ocss884

cc @lightaime

raise ValueError(f"Invalid model type: {model}")

Expand Down
9 changes: 4 additions & 5 deletions examples/anthropic_models/role_playing_with_claude.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,10 @@ def main(model_type=None) -> None:
print(Fore.RED + f"Final task prompt:\n{role_play_session.task_prompt}\n")

chat_turn_limit, n = 50, 0
input_assistant_msg, _ = role_play_session.init_chat()
input_msg = role_play_session.init_chat()
while n < chat_turn_limit:
n += 1
assistant_response, user_response = role_play_session.step(
input_assistant_msg)
assistant_response, user_response = role_play_session.step(input_msg)

if assistant_response.terminated:
print(Fore.GREEN +
Expand All @@ -78,9 +77,9 @@ def main(model_type=None) -> None:
if "CAMEL_TASK_DONE" in user_response.msg.content:
break

input_assistant_msg = assistant_response.msg
input_msg = assistant_response.msg


if __name__ == "__main__":

main(model_type=ModelType.CLAUDE_2)
main(model_type=ModelType.CLAUDE_2_0)
Loading
Loading