diff --git a/.github/workflows/pytest_package.yml b/.github/workflows/pytest_package.yml index c1e0d899a..8db9440d8 100644 --- a/.github/workflows/pytest_package.yml +++ b/.github/workflows/pytest_package.yml @@ -29,6 +29,8 @@ jobs: OPENWEATHERMAP_API_KEY: "${{ secrets.OPENWEATHERMAP_API_KEY }}" ANTHROPIC_API_KEY: "${{ secrets.ANTHROPIC_API_KEY }}" COHERE_API_KEY: "${{ secrets.COHERE_API_KEY }}" + NVIDIA_API_BASE_URL: "${{ secrets.NVIDIA_API_BASE_URL }}" + NVIDIA_API_KEY: "${{ secrets.NVIDIA_API_KEY }}" run: poetry run pytest --fast-test-mode test/ pytest_package_llm_test: @@ -47,6 +49,8 @@ jobs: OPENWEATHERMAP_API_KEY: "${{ secrets.OPENWEATHERMAP_API_KEY }}" ANTHROPIC_API_KEY: "${{ secrets.ANTHROPIC_API_KEY }}" COHERE_API_KEY: "${{ secrets.COHERE_API_KEY }}" + NVIDIA_API_BASE_URL: "${{ secrets.NVIDIA_API_BASE_URL }}" + NVIDIA_API_KEY: "${{ secrets.NVIDIA_API_KEY }}" run: poetry run pytest --llm-test-only test/ pytest_package_very_slow_test: @@ -65,4 +69,6 @@ jobs: OPENWEATHERMAP_API_KEY: "${{ secrets.OPENWEATHERMAP_API_KEY }}" ANTHROPIC_API_KEY: "${{ secrets.ANTHROPIC_API_KEY }}" COHERE_API_KEY: "${{ secrets.COHERE_API_KEY }}" + NVIDIA_API_BASE_URL: "${{ secrets.NVIDIA_API_BASE_URL }}" + NVIDIA_API_KEY: "${{ secrets.NVIDIA_API_KEY }}" run: poetry run pytest --very-slow-test-only test/ diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index db04856e4..2951c5ac4 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -522,11 +522,19 @@ def handle_batch_response( """ output_messages: List[BaseMessage] = [] for choice in response.choices: + if isinstance(choice.message, list): + # If choice.message is a list, handle accordingly + # It's a check to fit with Nemotron model integration. + content = "".join( + [msg.content for msg in choice.message if msg.content] + ) + else: + content = choice.message.content or "" chat_message = BaseMessage( role_name=self.role_name, role_type=self.role_type, meta_dict=dict(), - content=choice.message.content or "", + content=content, ) output_messages.append(chat_message) finish_reasons = [ diff --git a/camel/models/__init__.py b/camel/models/__init__.py index fbc4f8cc9..48e41ddc9 100644 --- a/camel/models/__init__.py +++ b/camel/models/__init__.py @@ -15,6 +15,7 @@ from .base_model import BaseModelBackend from .litellm_model import LiteLLMModel from .model_factory import ModelFactory +from .nemotron_model import NemotronModel from .ollama_model import OllamaModel from .open_source_model import OpenSourceModel from .openai_audio_models import OpenAIAudioModels @@ -32,5 +33,6 @@ 'ModelFactory', 'LiteLLMModel', 'OpenAIAudioModels', + 'NemotronModel', 'OllamaModel', ] diff --git a/camel/models/nemotron_model.py b/camel/models/nemotron_model.py new file mode 100644 index 000000000..659e0b587 --- /dev/null +++ b/camel/models/nemotron_model.py @@ -0,0 +1,71 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import os +from typing import List, Optional + +from openai import OpenAI + +from camel.messages import OpenAIMessage +from camel.types import ChatCompletion, ModelType +from camel.utils import ( + BaseTokenCounter, + model_api_key_required, +) + + +class NemotronModel: + r"""Nemotron model API backend with OpenAI compatibility.""" + + # NOTE: Nemotron model doesn't support additional model config like OpenAI. + + def __init__( + self, + model_type: ModelType, + api_key: Optional[str] = None, + ) -> None: + r"""Constructor for Nvidia backend. + + Args: + model_type (ModelType): Model for which a backend is created. + api_key (Optional[str]): The API key for authenticating with the + Nvidia service. (default: :obj:`None`) + """ + self.model_type = model_type + url = os.environ.get('NVIDIA_API_BASE_URL', None) + self._api_key = api_key or os.environ.get("NVIDIA_API_KEY") + if not url or not self._api_key: + raise ValueError("The NVIDIA API base url and key should be set.") + self._client = OpenAI( + timeout=60, max_retries=3, base_url=url, api_key=self._api_key + ) + self._token_counter: Optional[BaseTokenCounter] = None + + @model_api_key_required + def run( + self, + messages: List[OpenAIMessage], + ) -> ChatCompletion: + r"""Runs inference of OpenAI chat completion. + + Args: + messages (List[OpenAIMessage]): Message list. + + Returns: + ChatCompletion. + """ + response = self._client.chat.completions.create( + messages=messages, + model=self.model_type.value, + ) + return response diff --git a/camel/types/enums.py b/camel/types/enums.py index a51b9bc06..ef841bd3b 100644 --- a/camel/types/enums.py +++ b/camel/types/enums.py @@ -45,11 +45,14 @@ class ModelType(Enum): CLAUDE_2_0 = "claude-2.0" CLAUDE_INSTANT_1_2 = "claude-instant-1.2" - # 3 models + # Claude3 models CLAUDE_3_OPUS = "claude-3-opus-20240229" CLAUDE_3_SONNET = "claude-3-sonnet-20240229" CLAUDE_3_HAIKU = "claude-3-haiku-20240307" + # Nvidia models + NEMOTRON_4_REWARD = "nvidia/nemotron-4-340b-reward" + @property def value_for_tiktoken(self) -> str: return ( @@ -103,6 +106,17 @@ def is_anthropic(self) -> bool: ModelType.CLAUDE_3_HAIKU, } + @property + def is_nvidia(self) -> bool: + r"""Returns whether this type of models is Nvidia-released model. + + Returns: + bool: Whether this type of models is nvidia. + """ + return self in { + ModelType.NEMOTRON_4_REWARD, + } + @property def token_limit(self) -> int: r"""Returns the maximum token limit for a given model. @@ -134,7 +148,7 @@ def token_limit(self) -> int: return 2048 elif self is ModelType.VICUNA_16K: return 16384 - if self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}: + elif self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}: return 100_000 elif self in { ModelType.CLAUDE_2_1, @@ -143,6 +157,8 @@ def token_limit(self) -> int: ModelType.CLAUDE_3_HAIKU, }: return 200_000 + elif self is ModelType.NEMOTRON_4_REWARD: + return 4096 else: raise ValueError("Unknown model type") diff --git a/camel/utils/commons.py b/camel/utils/commons.py index 76388d9e6..8597e35c4 100644 --- a/camel/utils/commons.py +++ b/camel/utils/commons.py @@ -62,6 +62,10 @@ def wrapper(self, *args, **kwargs): if not self._api_key and 'ANTHROPIC_API_KEY' not in os.environ: raise ValueError('Anthropic API key not found.') return func(self, *args, **kwargs) + elif self.model_type.is_nvidia: + if not self._api_key and 'NVIDIA_API_KEY' not in os.environ: + raise ValueError('NVIDIA API key not found.') + return func(self, *args, **kwargs) else: raise ValueError('Unsupported model type.') diff --git a/examples/litellm_models/litellm_model_example.py b/examples/models/litellm_model_example.py similarity index 100% rename from examples/litellm_models/litellm_model_example.py rename to examples/models/litellm_model_example.py diff --git a/examples/models/nemotron_model_example.py b/examples/models/nemotron_model_example.py new file mode 100644 index 000000000..1195b03d1 --- /dev/null +++ b/examples/models/nemotron_model_example.py @@ -0,0 +1,48 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== + +from camel.models import NemotronModel +from camel.types import ModelType + +nemotro = NemotronModel(model_type=ModelType.NEMOTRON_4_REWARD) + +message = [ + {"role": "user", "content": "I am going to Paris, what should I see?"}, + { + "role": "assistant", + "content": "Ah, Paris, the City of Light! There are so " + "many amazing things to see and do in this beautiful city ...", + }, +] + +ans = nemotro.run(message) +print(ans) +''' +=============================================================================== +ChatCompletion(id='4668ad22-1dec-4df4-ba92-97ffa5fbd16d', choices=[Choice +(finish_reason='length', index=0, logprobs=ChoiceLogprobs(content= +[ChatCompletionTokenLogprob(token='helpfulness', bytes=None, logprob=1. +6171875, top_logprobs=[]), ChatCompletionTokenLogprob(token='correctness', +bytes=None, logprob=1.6484375, top_logprobs=[]), ChatCompletionTokenLogprob +(token='coherence', bytes=None, logprob=3.3125, top_logprobs=[]), +ChatCompletionTokenLogprob(token='complexity', bytes=None, logprob=0.546875, +top_logprobs=[]), ChatCompletionTokenLogprob(token='verbosity', bytes=None, +logprob=0.515625, top_logprobs=[])]), message=[ChatCompletionMessage +(content='helpfulness:1.6171875,correctness:1.6484375,coherence:3.3125, +complexity:0.546875,verbosity:0.515625', role='assistant', function_call=None, +tool_calls=None)])], created=None, model=None, object=None, +system_fingerprint=None, usage=CompletionUsage(completion_tokens=1, +prompt_tokens=78, total_tokens=79)) +=============================================================================== +''' diff --git a/examples/openai_audio_models/openai_audio_models.py b/examples/models/openai_audio_models_example.py similarity index 100% rename from examples/openai_audio_models/openai_audio_models.py rename to examples/models/openai_audio_models_example.py diff --git a/examples/anthropic_models/role_playing_with_claude.py b/examples/models/role_playing_with_claude.py similarity index 100% rename from examples/anthropic_models/role_playing_with_claude.py rename to examples/models/role_playing_with_claude.py diff --git a/examples/open_source_models/role_playing_with_open_source_model.py b/examples/models/role_playing_with_open_source_model.py similarity index 100% rename from examples/open_source_models/role_playing_with_open_source_model.py rename to examples/models/role_playing_with_open_source_model.py diff --git a/examples/zhipuai_models/zhipuai_model.py b/examples/models/zhipuai_model_example copy.py similarity index 100% rename from examples/zhipuai_models/zhipuai_model.py rename to examples/models/zhipuai_model_example copy.py diff --git a/examples/test/test_ai_society_example.py b/examples/test/test_ai_society_example.py index fe50168a3..8b926525a 100644 --- a/examples/test/test_ai_society_example.py +++ b/examples/test/test_ai_society_example.py @@ -15,7 +15,7 @@ import examples.ai_society.role_playing import examples.function_call.role_playing_with_functions -import examples.open_source_models.role_playing_with_open_source_model +import examples.models.role_playing_with_open_source_model from camel.models import ModelFactory from camel.types import ModelPlatformType, ModelType @@ -42,6 +42,6 @@ def test_role_playing_with_function_example(): def test_role_playing_with_open_source_model(): with patch('time.sleep', return_value=None): - examples.open_source_models.role_playing_with_open_source_model.main( + examples.models.role_playing_with_open_source_model.main( chat_turn_limit=2 )