Skip to content

Commit

Permalink
Merge branch 'master' into feature/insight-agent
Browse files Browse the repository at this point in the history
  • Loading branch information
Appointat committed Apr 22, 2024
2 parents e0dd839 + 25c8e42 commit 3ad4b5a
Show file tree
Hide file tree
Showing 24 changed files with 1,315 additions and 723 deletions.
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
🐫 **Welcome to CAMEL!** 🐫

Thank you for your interest in contributing to the CAMEL project! 🎉 We're excited to have your support. As an open-source initiative in a rapidly evolving and open-ended field, we wholeheartedly welcome contributions of all kinds. Whether you want to introduce new features, enhance the infrastructure, improve documentation, asking issues, add more examples, implement state-of-the-art research ideas, or fix bugs, we appreciate your enthusiasm and efforts. 🙌 You are welcome to join our [slack](https://join.slack.com/t/camel-kwr1314/shared_invite/zt-1vy8u9lbo-ZQmhIAyWSEfSwLCl2r2eKA) for more efficient communication. 💬
Thank you for your interest in contributing to the CAMEL project! 🎉 We're excited to have your support. As an open-source initiative in a rapidly evolving and open-ended field, we wholeheartedly welcome contributions of all kinds. Whether you want to introduce new features, enhance the infrastructure, improve documentation, asking issues, add more examples, implement state-of-the-art research ideas, or fix bugs, we appreciate your enthusiasm and efforts. 🙌 You are welcome to join our [slack](https://join.slack.com/t/camel-ai/shared_invite/zt-2g7xc41gy-_7rcrNNAArIP6sLQqldkqQ) for more efficient communication. 💬

## Guidelines 📝

Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ pip install -e .[all] # (Optional)

## Example

You can find a list of tasks for different set of assistant and user role pairs [here](https://drive.google.com/file/d/194PPaSTBR07m-PzjS-Ty6KlPLdFIPQDd/view?usp=share_link).
You can find a list of tasks for different sets of assistant and user role pairs [here](https://drive.google.com/file/d/194PPaSTBR07m-PzjS-Ty6KlPLdFIPQDd/view?usp=share_link).

As an example, to run the `role_playing.py` script:

Expand Down Expand Up @@ -282,7 +282,7 @@ For more information please contact camel.ai.team@gmail.com.
[colab-image]: https://colab.research.google.com/assets/colab-badge.svg
[huggingface-url]: https://huggingface.co/camel-ai
[huggingface-image]: https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-CAMEL--AI-ffc107?color=ffc107&logoColor=white
[slack-url]: https://join.slack.com/t/camel-kwr1314/shared_invite/zt-1vy8u9lbo-ZQmhIAyWSEfSwLCl2r2eKA
[slack-url]: https://join.slack.com/t/camel-ai/shared_invite/zt-2g7xc41gy-_7rcrNNAArIP6sLQqldkqQ
[slack-image]: https://img.shields.io/badge/Slack-CAMEL--AI-blueviolet?logo=slack
[discord-url]: https://discord.gg/CNcNpquyDc
[discord-image]: https://img.shields.io/badge/Discord-CAMEL--AI-7289da?logo=discord&logoColor=white&color=7289da
Expand Down
54 changes: 54 additions & 0 deletions camel/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
from dataclasses import asdict, dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union

from anthropic._types import NOT_GIVEN, NotGiven

if TYPE_CHECKING:
from camel.functions import OpenAIFunction

Expand Down Expand Up @@ -211,3 +213,55 @@ class OpenSourceConfig(BaseConfig):
param
for param in asdict(FunctionCallingConfig()).keys()
}


@dataclass(frozen=True)
class AnthropicConfig(BaseConfig):
r"""Defines the parameters for generating chat completions using the
Anthropic API.
See: https://docs.anthropic.com/claude/reference/complete_post
Args:
max_tokens_to_sample (int, optional): The maximum number of tokens to
generate before stopping. Note that Anthropic models may stop
before reaching this maximum. This parameter only specifies the
absolute maximum number of tokens to generate.
(default: :obj:`256`)
stop_sequences (List[str], optional): Sequences that will cause the
model to stop generating completion text. Anthropic models stop
on "\n\nHuman:", and may include additional built-in stop sequences
in the future. By providing the stop_sequences parameter, you may
include additional strings that will cause the model to stop
generating.
temperature (float, optional): Amount of randomness injected into the
response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
for analytical / multiple choice, and closer to 1 for creative
and generative tasks.
(default: :obj:`1`)
top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
compute the cumulative distribution over all the options for each
subsequent token in decreasing probability order and cut it off
once it reaches a particular probability specified by `top_p`.
You should either alter `temperature` or `top_p`,
but not both.
(default: :obj:`0.7`)
top_k (int, optional): Only sample from the top K options for each
subsequent token. Used to remove "long tail" low probability
responses.
(default: :obj:`5`)
metadata: An object describing metadata about the request.
stream (bool, optional): Whether to incrementally stream the response
using server-sent events.
(default: :obj:`False`)
"""
max_tokens: int = 256
stop_sequences: Union[List[str], NotGiven] = NOT_GIVEN
temperature: float = 1
top_p: Union[float, NotGiven] = NOT_GIVEN
top_k: Union[int, NotGiven] = NOT_GIVEN
metadata: NotGiven = NOT_GIVEN
stream: bool = False


ANTHROPIC_API_PARAMS = {param for param in asdict(AnthropicConfig()).keys()}
4 changes: 2 additions & 2 deletions camel/embeddings/openai_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

from camel.embeddings import BaseEmbedding
from camel.types import EmbeddingModelType
from camel.utils import openai_api_key_required
from camel.utils import api_key_required


class OpenAIEmbedding(BaseEmbedding[str]):
Expand All @@ -41,7 +41,7 @@ def __init__(
self.output_dim = model_type.output_dim
self.client = OpenAI()

@openai_api_key_required
@api_key_required
def embed_list(
self,
objs: List[str],
Expand Down
2 changes: 2 additions & 0 deletions camel/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,13 @@
from .openai_model import OpenAIModel
from .stub_model import StubModel
from .open_source_model import OpenSourceModel
from .anthropic_model import AnthropicModel
from .model_factory import ModelFactory

__all__ = [
'BaseModelBackend',
'OpenAIModel',
'AnthropicModel',
'StubModel',
'OpenSourceModel',
'ModelFactory',
Expand Down
118 changes: 118 additions & 0 deletions camel/models/anthropic_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import os
from typing import Any, Dict, Optional

from anthropic import Anthropic
from anthropic._types import NOT_GIVEN

from camel.configs import ANTHROPIC_API_PARAMS
from camel.models import BaseModelBackend
from camel.types import ChatCompletion, ModelType
from camel.utils import AnthropicTokenCounter, BaseTokenCounter


class AnthropicModel(BaseModelBackend):
r"""Anthropic API in a unified BaseModelBackend interface."""

def __init__(self, model_type: ModelType,
model_config_dict: Dict[str, Any]) -> None:
super().__init__(model_type, model_config_dict)
self.client = Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
self._token_counter: Optional[BaseTokenCounter] = None

def _convert_response_from_anthropic_to_openai(self, response):
# openai ^1.0.0 format, reference openai/types/chat/chat_completion.py
obj = ChatCompletion.construct(
id=None, choices=[
dict(
index=0, message={
"role": "assistant",
"content": response.content[0].text
}, finish_reason=response.stop_reason)
], created=None, model=response.model, object="chat.completion")
return obj

@property
def token_counter(self) -> BaseTokenCounter:
r"""Initialize the token counter for the model backend.
Returns:
BaseTokenCounter: The token counter following the model's
tokenization style.
"""
if not self._token_counter:
self._token_counter = AnthropicTokenCounter(self.model_type)
return self._token_counter

def count_tokens_from_prompt(self, prompt: str) -> int:
r"""Count the number of tokens from a prompt.
Args:
prompt (str): The prompt string.
Returns:
int: The number of tokens in the prompt.
"""
return self.client.count_tokens(prompt)

def run(
self,
messages,
):
r"""Run inference of Anthropic chat completion.
Args:
messages (List[Dict]): Message list with the chat history
in OpenAI API format.
Returns:
Dict[str, Any]: Response in the OpenAI API format.
"""

if messages[0]["role"] == "system":
sys_msg = messages.pop(0)["content"] # type: ignore
else:
sys_msg = NOT_GIVEN
response = self.client.messages.create( # type: ignore
model=self.model_type.value, system=sys_msg, messages=messages,
**self.model_config_dict)

# format response to openai format
response = self._convert_response_from_anthropic_to_openai(response)

return response

def check_model_config(self):
r"""Check whether the model configuration is valid for anthropic
model backends.
Raises:
ValueError: If the model configuration dictionary contains any
unexpected arguments to OpenAI API, or it does not contain
:obj:`model_path` or :obj:`server_url`.
"""
for param in self.model_config_dict:
if param not in ANTHROPIC_API_PARAMS:
raise ValueError(f"Unexpected argument `{param}` is "
"input into Anthropic model backend.")

@property
def stream(self) -> bool:
r"""Returns whether the model is in stream mode,
which sends partial results each time.
Returns:
bool: Whether the model is in stream mode.
"""
return self.model_config_dict.get("stream", False)
3 changes: 3 additions & 0 deletions camel/models/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from typing import Any, Dict

from camel.models import (
AnthropicModel,
BaseModelBackend,
OpenAIModel,
OpenSourceModel,
Expand Down Expand Up @@ -52,6 +53,8 @@ def create(model_type: ModelType,
model_class = StubModel
elif model_type.is_open_source:
model_class = OpenSourceModel
elif model_type.is_anthropic:
model_class = AnthropicModel
else:
raise ValueError(f"Unknown model type `{model_type}` is input")

Expand Down
8 changes: 2 additions & 6 deletions camel/models/openai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,7 @@
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
from camel.utils import (
BaseTokenCounter,
OpenAITokenCounter,
openai_api_key_required,
)
from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_key_required


class OpenAIModel(BaseModelBackend):
Expand Down Expand Up @@ -57,7 +53,7 @@ def token_counter(self) -> BaseTokenCounter:
self._token_counter = OpenAITokenCounter(self.model_type)
return self._token_counter

@openai_api_key_required
@api_key_required
def run(
self,
messages: List[OpenAIMessage],
Expand Down
36 changes: 36 additions & 0 deletions camel/types/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,17 @@ class ModelType(Enum):
VICUNA = "vicuna"
VICUNA_16K = "vicuna-16k"

# Legacy anthropic models
# NOTE: anthropic lagecy models only Claude 2.1 has system prompt support
CLAUDE_2_1 = "claude-2.1"
CLAUDE_2_0 = "claude-2.0"
CLAUDE_INSTANT_1_2 = "claude-instant-1.2"

# 3 models
CLAUDE_3_OPUS = "claude-3-opus-20240229"
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"

@property
def value_for_tiktoken(self) -> str:
return self.value if self is not ModelType.STUB else "gpt-3.5-turbo"
Expand All @@ -62,6 +73,22 @@ def is_open_source(self) -> bool:
ModelType.VICUNA_16K,
}

@property
def is_anthropic(self) -> bool:
r"""Returns whether this type of models is Anthropic-released model.
Returns:
bool: Whether this type of models is anthropic.
"""
return self in {
ModelType.CLAUDE_INSTANT_1_2,
ModelType.CLAUDE_2_0,
ModelType.CLAUDE_2_1,
ModelType.CLAUDE_3_OPUS,
ModelType.CLAUDE_3_SONNET,
ModelType.CLAUDE_3_HAIKU,
}

@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for a given model.
Expand Down Expand Up @@ -89,6 +116,15 @@ def token_limit(self) -> int:
return 2048
elif self is ModelType.VICUNA_16K:
return 16384
if self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
return 100_000
elif self in {
ModelType.CLAUDE_2_1,
ModelType.CLAUDE_3_OPUS,
ModelType.CLAUDE_3_SONNET,
ModelType.CLAUDE_3_HAIKU,
}:
return 200_000
else:
raise ValueError("Unknown model type")

Expand Down
14 changes: 6 additions & 8 deletions camel/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from .commons import (
openai_api_key_required,
api_key_required,
print_text_animated,
get_prompt_template_key_words,
get_first_int,
Expand All @@ -24,21 +24,19 @@
to_pascal,
PYDANTIC_V2,
)
from .token_counting import (
get_model_encoding,
BaseTokenCounter,
OpenAITokenCounter,
OpenSourceTokenCounter,
)
from .token_counting import (get_model_encoding, BaseTokenCounter,
OpenAITokenCounter, OpenSourceTokenCounter,
AnthropicTokenCounter)

__all__ = [
'openai_api_key_required',
'api_key_required',
'print_text_animated',
'get_prompt_template_key_words',
'get_first_int',
'download_tasks',
'get_task_list',
'check_server_running',
'AnthropicTokenCounter',
'get_system_information',
'to_pascal',
'PYDANTIC_V2',
Expand Down

0 comments on commit 3ad4b5a

Please sign in to comment.