Skip to content

Commit

Permalink
Revert "feat: add Anthropic support (#288)"
Browse files Browse the repository at this point in the history
This reverts commit 25c8e42.
  • Loading branch information
Appointat committed Apr 21, 2024
1 parent 25c8e42 commit d410d7a
Show file tree
Hide file tree
Showing 14 changed files with 385 additions and 790 deletions.
54 changes: 0 additions & 54 deletions camel/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
from dataclasses import asdict, dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union

from anthropic._types import NOT_GIVEN, NotGiven

if TYPE_CHECKING:
from camel.functions import OpenAIFunction

Expand Down Expand Up @@ -213,55 +211,3 @@ class OpenSourceConfig(BaseConfig):
param
for param in asdict(FunctionCallingConfig()).keys()
}


@dataclass(frozen=True)
class AnthropicConfig(BaseConfig):
r"""Defines the parameters for generating chat completions using the
Anthropic API.
See: https://docs.anthropic.com/claude/reference/complete_post
Args:
max_tokens_to_sample (int, optional): The maximum number of tokens to
generate before stopping. Note that Anthropic models may stop
before reaching this maximum. This parameter only specifies the
absolute maximum number of tokens to generate.
(default: :obj:`256`)
stop_sequences (List[str], optional): Sequences that will cause the
model to stop generating completion text. Anthropic models stop
on "\n\nHuman:", and may include additional built-in stop sequences
in the future. By providing the stop_sequences parameter, you may
include additional strings that will cause the model to stop
generating.
temperature (float, optional): Amount of randomness injected into the
response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
for analytical / multiple choice, and closer to 1 for creative
and generative tasks.
(default: :obj:`1`)
top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
compute the cumulative distribution over all the options for each
subsequent token in decreasing probability order and cut it off
once it reaches a particular probability specified by `top_p`.
You should either alter `temperature` or `top_p`,
but not both.
(default: :obj:`0.7`)
top_k (int, optional): Only sample from the top K options for each
subsequent token. Used to remove "long tail" low probability
responses.
(default: :obj:`5`)
metadata: An object describing metadata about the request.
stream (bool, optional): Whether to incrementally stream the response
using server-sent events.
(default: :obj:`False`)
"""
max_tokens: int = 256
stop_sequences: Union[List[str], NotGiven] = NOT_GIVEN
temperature: float = 1
top_p: Union[float, NotGiven] = NOT_GIVEN
top_k: Union[int, NotGiven] = NOT_GIVEN
metadata: NotGiven = NOT_GIVEN
stream: bool = False


ANTHROPIC_API_PARAMS = {param for param in asdict(AnthropicConfig()).keys()}
4 changes: 2 additions & 2 deletions camel/embeddings/openai_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

from camel.embeddings import BaseEmbedding
from camel.types import EmbeddingModelType
from camel.utils import api_key_required
from camel.utils import openai_api_key_required


class OpenAIEmbedding(BaseEmbedding[str]):
Expand All @@ -41,7 +41,7 @@ def __init__(
self.output_dim = model_type.output_dim
self.client = OpenAI()

@api_key_required
@openai_api_key_required
def embed_list(
self,
objs: List[str],
Expand Down
2 changes: 0 additions & 2 deletions camel/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,11 @@
from .openai_model import OpenAIModel
from .stub_model import StubModel
from .open_source_model import OpenSourceModel
from .anthropic_model import AnthropicModel
from .model_factory import ModelFactory

__all__ = [
'BaseModelBackend',
'OpenAIModel',
'AnthropicModel',
'StubModel',
'OpenSourceModel',
'ModelFactory',
Expand Down
118 changes: 0 additions & 118 deletions camel/models/anthropic_model.py

This file was deleted.

3 changes: 0 additions & 3 deletions camel/models/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
from typing import Any, Dict

from camel.models import (
AnthropicModel,
BaseModelBackend,
OpenAIModel,
OpenSourceModel,
Expand Down Expand Up @@ -53,8 +52,6 @@ def create(model_type: ModelType,
model_class = StubModel
elif model_type.is_open_source:
model_class = OpenSourceModel
elif model_type.is_anthropic:
model_class = AnthropicModel
else:
raise ValueError(f"Unknown model type `{model_type}` is input")

Expand Down
8 changes: 6 additions & 2 deletions camel/models/openai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,11 @@
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_key_required
from camel.utils import (
BaseTokenCounter,
OpenAITokenCounter,
openai_api_key_required,
)


class OpenAIModel(BaseModelBackend):
Expand Down Expand Up @@ -53,7 +57,7 @@ def token_counter(self) -> BaseTokenCounter:
self._token_counter = OpenAITokenCounter(self.model_type)
return self._token_counter

@api_key_required
@openai_api_key_required
def run(
self,
messages: List[OpenAIMessage],
Expand Down
36 changes: 0 additions & 36 deletions camel/types/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,17 +37,6 @@ class ModelType(Enum):
VICUNA = "vicuna"
VICUNA_16K = "vicuna-16k"

# Legacy anthropic models
# NOTE: anthropic lagecy models only Claude 2.1 has system prompt support
CLAUDE_2_1 = "claude-2.1"
CLAUDE_2_0 = "claude-2.0"
CLAUDE_INSTANT_1_2 = "claude-instant-1.2"

# 3 models
CLAUDE_3_OPUS = "claude-3-opus-20240229"
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"

@property
def value_for_tiktoken(self) -> str:
return self.value if self is not ModelType.STUB else "gpt-3.5-turbo"
Expand All @@ -73,22 +62,6 @@ def is_open_source(self) -> bool:
ModelType.VICUNA_16K,
}

@property
def is_anthropic(self) -> bool:
r"""Returns whether this type of models is Anthropic-released model.
Returns:
bool: Whether this type of models is anthropic.
"""
return self in {
ModelType.CLAUDE_INSTANT_1_2,
ModelType.CLAUDE_2_0,
ModelType.CLAUDE_2_1,
ModelType.CLAUDE_3_OPUS,
ModelType.CLAUDE_3_SONNET,
ModelType.CLAUDE_3_HAIKU,
}

@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for a given model.
Expand Down Expand Up @@ -116,15 +89,6 @@ def token_limit(self) -> int:
return 2048
elif self is ModelType.VICUNA_16K:
return 16384
if self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
return 100_000
elif self in {
ModelType.CLAUDE_2_1,
ModelType.CLAUDE_3_OPUS,
ModelType.CLAUDE_3_SONNET,
ModelType.CLAUDE_3_HAIKU,
}:
return 200_000
else:
raise ValueError("Unknown model type")

Expand Down
14 changes: 8 additions & 6 deletions camel/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from .commons import (
api_key_required,
openai_api_key_required,
print_text_animated,
get_prompt_template_key_words,
get_first_int,
Expand All @@ -24,19 +24,21 @@
to_pascal,
PYDANTIC_V2,
)
from .token_counting import (get_model_encoding, BaseTokenCounter,
OpenAITokenCounter, OpenSourceTokenCounter,
AnthropicTokenCounter)
from .token_counting import (
get_model_encoding,
BaseTokenCounter,
OpenAITokenCounter,
OpenSourceTokenCounter,
)

__all__ = [
'api_key_required',
'openai_api_key_required',
'print_text_animated',
'get_prompt_template_key_words',
'get_first_int',
'download_tasks',
'get_task_list',
'check_server_running',
'AnthropicTokenCounter',
'get_system_information',
'to_pascal',
'PYDANTIC_V2',
Expand Down
12 changes: 3 additions & 9 deletions camel/utils/commons.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def get_lazy_imported_types_module():
return ModelType.GPT_4_TURBO


def api_key_required(func: F) -> F:
def openai_api_key_required(func: F) -> F:
r"""Decorator that checks if the OpenAI API key is available in the
environment variables.
Expand All @@ -63,16 +63,10 @@ def api_key_required(func: F) -> F:

@wraps(func)
def wrapper(self, *args, **kwargs):
if self.model_type.is_openai:
if 'OPENAI_API_KEY' not in os.environ:
raise ValueError('OpenAI API key not found.')
return func(self, *args, **kwargs)
elif self.model_type.is_anthropic:
if 'ANTHROPIC_API_KEY' not in os.environ:
raise ValueError('Anthropic API key not found.')
if 'OPENAI_API_KEY' in os.environ:
return func(self, *args, **kwargs)
else:
raise ValueError('Unsupported model type.')
raise ValueError('OpenAI API key not found.')

return cast(F, wrapper)

Expand Down
Loading

0 comments on commit d410d7a

Please sign in to comment.