diff --git a/.env b/.env index 8c4a715958..6b84096d88 100644 --- a/.env +++ b/.env @@ -50,8 +50,8 @@ # NVIDIA API (https://build.nvidia.com/explore/discover) # NVIDIA_API_KEY="Fill your API key here" -# OpenBB Platform API (https://my.openbb.co/app/credentials) -# OPENBB_TOKEN="Fill your API key here" +# InternLM API (https://internlm.intern-ai.org.cn/api/tokens) +# INTERNLM_API_KEY="Fill your API key here" #=========================================== # Tools & Services API @@ -87,3 +87,6 @@ # Discord Bot API (https://discord.com/developers/applications) # DISCORD_BOT_TOKEN="Fill your API key here" + +# OpenBB Platform API (https://my.openbb.co/app/credentials) +# OPENBB_TOKEN="Fill your API key here" diff --git a/.github/workflows/build_package.yml b/.github/workflows/build_package.yml index 8d3d9d4a52..e062074a64 100644 --- a/.github/workflows/build_package.yml +++ b/.github/workflows/build_package.yml @@ -78,6 +78,7 @@ jobs: DEEPSEEK_API_KEY: "${{ secrets.DEEPSEEK_API_KEY }}" DAPPIER_API_KEY: "${{ secrets.DAPPIER_API_KEY }}" DISCORD_BOT_TOKEN: "${{ secrets.DISCORD_BOT_TOKEN }}" + INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}" run: | source venv/bin/activate pytest --fast-test-mode ./test diff --git a/.github/workflows/pytest_apps.yml b/.github/workflows/pytest_apps.yml index 63d2cc6e73..e1cf0f0c01 100644 --- a/.github/workflows/pytest_apps.yml +++ b/.github/workflows/pytest_apps.yml @@ -29,6 +29,7 @@ jobs: GOOGLE_API_KEY: "${{ secrets.GOOGLE_API_KEY }}" SEARCH_ENGINE_ID: "${{ secrets.SEARCH_ENGINE_ID }}" COHERE_API_KEY: "${{ secrets.COHERE_API_KEY }}" + INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}" run: poetry run pytest -v apps/ pytest_examples: @@ -47,4 +48,5 @@ jobs: GOOGLE_API_KEY: "${{ secrets.GOOGLE_API_KEY }}" SEARCH_ENGINE_ID: "${{ secrets.SEARCH_ENGINE_ID }}" COHERE_API_KEY: "${{ secrets.COHERE_API_KEY }}" + INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}" run: poetry run pytest -v examples/ diff --git a/.github/workflows/pytest_package.yml b/.github/workflows/pytest_package.yml index b87e8c1d78..4dd092f659 100644 --- a/.github/workflows/pytest_package.yml +++ b/.github/workflows/pytest_package.yml @@ -57,6 +57,7 @@ jobs: DEEPSEEK_API_KEY: "${{ secrets.DEEPSEEK_API_KEY }}" DAPPIER_API_KEY: "${{ secrets.DAPPIER_API_KEY }}" DISCORD_BOT_TOKEN: "${{ secrets.DISCORD_BOT_TOKEN }}" + INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}" run: poetry run pytest --fast-test-mode test/ pytest_package_llm_test: @@ -103,6 +104,7 @@ jobs: DEEPSEEK_API_KEY: "${{ secrets.DEEPSEEK_API_KEY }}" DAPPIER_API_KEY: "${{ secrets.DAPPIER_API_KEY }}" DISCORD_BOT_TOKEN: "${{ secrets.DISCORD_BOT_TOKEN }}" + INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}" run: poetry run pytest --llm-test-only test/ pytest_package_very_slow_test: @@ -149,4 +151,5 @@ jobs: DEEPSEEK_API_KEY: "${{ secrets.DEEPSEEK_API_KEY }}" DAPPIER_API_KEY: "${{ secrets.DAPPIER_API_KEY }}" DISCORD_BOT_TOKEN: "${{ secrets.DISCORD_BOT_TOKEN }}" + INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}" run: poetry run pytest --very-slow-test-only test/ diff --git a/camel/configs/__init__.py b/camel/configs/__init__.py index fcd9b7a9c4..2e6b30b3f1 100644 --- a/camel/configs/__init__.py +++ b/camel/configs/__init__.py @@ -17,6 +17,7 @@ from .deepseek_config import DEEPSEEK_API_PARAMS, DeepSeekConfig from .gemini_config import Gemini_API_PARAMS, GeminiConfig from .groq_config import GROQ_API_PARAMS, GroqConfig +from .internlm_config import INTERNLM_API_PARAMS, InternLMConfig from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig from .mistral_config import MISTRAL_API_PARAMS, MistralConfig from .nvidia_config import NVIDIA_API_PARAMS, NvidiaConfig @@ -76,4 +77,6 @@ 'QWEN_API_PARAMS', 'DeepSeekConfig', 'DEEPSEEK_API_PARAMS', + 'InternLMConfig', + 'INTERNLM_API_PARAMS', ] diff --git a/camel/configs/internlm_config.py b/camel/configs/internlm_config.py new file mode 100644 index 0000000000..030f5c8ef2 --- /dev/null +++ b/camel/configs/internlm_config.py @@ -0,0 +1,60 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +from typing import Optional, Union + +from camel.configs.base_config import BaseConfig + + +class InternLMConfig(BaseConfig): + r"""Defines the parameters for generating chat completions using the + InternLM API. You can refer to the following link for more details: + https://internlm.intern-ai.org.cn/api/document + + Args: + stream (bool, optional): Whether to stream the response. + (default: :obj:`False`) + temperature (float, optional): Controls the diversity and focus of + the generated results. Lower values make the output more focused, + while higher values make it more diverse. (default: :obj:`0.3`) + top_p (float, optional): Controls the diversity and focus of the + generated results. Higher values make the output more diverse, + while lower values make it more focused. (default: :obj:`0.9`) + max_tokens (Union[int, NotGiven], optional): Allows the model to + generate the maximum number of tokens. + (default: :obj:`NOT_GIVEN`) + tools (list, optional): Specifies an array of tools that the model can + call. It can contain one or more tool objects. During a function + call process, the model will select one tool from the array. + (default: :obj:`None`) + tool_choice (Union[dict[str, str], str], optional): Controls which (if + any) tool is called by the model. :obj:`"none"` means the model + will not call any tool and instead generates a message. + :obj:`"auto"` means the model can pick between generating a + message or calling one or more tools. :obj:`"required"` means the + model must call one or more tools. Specifying a particular tool + via {"type": "function", "function": {"name": "my_function"}} + forces the model to call that tool. :obj:`"none"` is the default + when no tools are present. :obj:`"auto"` is the default if tools + are present. + """ + + stream: bool = False + temperature: float = 0.8 + top_p: float = 0.9 + max_tokens: Optional[int] = None + tool_choice: Optional[Union[dict[str, str], str]] = None + + +INTERNLM_API_PARAMS = {param for param in InternLMConfig.model_fields.keys()} diff --git a/camel/models/__init__.py b/camel/models/__init__.py index a80a80d924..6a4adc4c4c 100644 --- a/camel/models/__init__.py +++ b/camel/models/__init__.py @@ -19,6 +19,7 @@ from .fish_audio_model import FishAudioModel from .gemini_model import GeminiModel from .groq_model import GroqModel +from .internlm_model import InternLMModel from .litellm_model import LiteLLMModel from .mistral_model import MistralModel from .model_factory import ModelFactory @@ -68,4 +69,5 @@ 'ModelProcessingError', 'DeepSeekModel', 'FishAudioModel', + 'InternLMModel', ] diff --git a/camel/models/internlm_model.py b/camel/models/internlm_model.py new file mode 100644 index 0000000000..a4a1be2d1d --- /dev/null +++ b/camel/models/internlm_model.py @@ -0,0 +1,143 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +import os +from typing import Any, Dict, List, Optional, Union + +from openai import OpenAI, Stream + +from camel.configs import INTERNLM_API_PARAMS, InternLMConfig +from camel.messages import OpenAIMessage +from camel.models import BaseModelBackend +from camel.types import ( + ChatCompletion, + ChatCompletionChunk, + ModelType, +) +from camel.utils import ( + BaseTokenCounter, + OpenAITokenCounter, + api_keys_required, +) + + +class InternLMModel(BaseModelBackend): + r"""InternLM API in a unified BaseModelBackend interface. + + Args: + model_type (Union[ModelType, str]): Model for which a backend is + created, one of InternLM series. + model_config_dict (Optional[Dict[str, Any]], optional): A dictionary + that will be fed into:obj:`openai.ChatCompletion.create()`. If + :obj:`None`, :obj:`InternLMConfig().as_dict()` will be used. + (default: :obj:`None`) + api_key (Optional[str], optional): The API key for authenticating with + the InternLM service. (default: :obj:`None`) + url (Optional[str], optional): The url to the InternLM service. + (default: :obj:`https://internlm-chat.intern-ai.org.cn/puyu/api/v1`) + token_counter (Optional[BaseTokenCounter], optional): Token counter to + use for the model. If not provided, :obj:`OpenAITokenCounter( + ModelType.GPT_4O_MINI)` will be used. + (default: :obj:`None`) + """ + + @api_keys_required( + [ + ("api_key", "INTERNLM_API_KEY"), + ] + ) + def __init__( + self, + model_type: Union[ModelType, str], + model_config_dict: Optional[Dict[str, Any]] = None, + api_key: Optional[str] = None, + url: Optional[str] = None, + token_counter: Optional[BaseTokenCounter] = None, + ) -> None: + if model_config_dict is None: + model_config_dict = InternLMConfig().as_dict() + api_key = api_key or os.environ.get("INTERNLM_API_KEY") + url = url or os.environ.get( + "INTERNLM_API_BASE_URL", + "https://internlm-chat.intern-ai.org.cn/puyu/api/v1", + ) + super().__init__( + model_type, model_config_dict, api_key, url, token_counter + ) + self._client = OpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) + + def run( + self, + messages: List[OpenAIMessage], + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of InternLM chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + response = self._client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response + + @property + def token_counter(self) -> BaseTokenCounter: + r"""Initialize the token counter for the model backend. + + Returns: + OpenAITokenCounter: The token counter following the model's + tokenization style. + """ + + if not self._token_counter: + self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI) + return self._token_counter + + def check_model_config(self): + r"""Check whether the model configuration contains any + unexpected arguments to InternLM API. + + Raises: + ValueError: If the model configuration dictionary contains any + unexpected arguments to InternLM API. + """ + for param in self.model_config_dict: + if param not in INTERNLM_API_PARAMS: + raise ValueError( + f"Unexpected argument `{param}` is " + "input into InternLM model backend." + ) + + @property + def stream(self) -> bool: + r"""Returns whether the model is in stream mode, which sends partial + results each time. + + Returns: + bool: Whether the model is in stream mode. + """ + return self.model_config_dict.get('stream', False) diff --git a/camel/models/model_factory.py b/camel/models/model_factory.py index 309c3dce67..c401ffd0aa 100644 --- a/camel/models/model_factory.py +++ b/camel/models/model_factory.py @@ -20,6 +20,7 @@ from camel.models.deepseek_model import DeepSeekModel from camel.models.gemini_model import GeminiModel from camel.models.groq_model import GroqModel +from camel.models.internlm_model import InternLMModel from camel.models.litellm_model import LiteLLMModel from camel.models.mistral_model import MistralModel from camel.models.nvidia_model import NvidiaModel @@ -124,6 +125,8 @@ def create( model_class = QwenModel elif model_platform.is_deepseek: model_class = DeepSeekModel + elif model_platform.is_internlm and model_type.is_internlm: + model_class = InternLMModel elif model_type == ModelType.STUB: model_class = StubModel diff --git a/camel/types/enums.py b/camel/types/enums.py index 5e2a04474d..d11c2dbefa 100644 --- a/camel/types/enums.py +++ b/camel/types/enums.py @@ -142,6 +142,12 @@ class ModelType(UnifiedModelType, Enum): # DeepSeek models DEEPSEEK_CHAT = "deepseek-chat" + # InternLM models + INTERNLM3_LATEST = "internlm3-latest" + INTERNLM3_8B_INSTRUCT = "internlm3-8b-instruct" + INTERNLM2_5_LATEST = "internlm2.5-latest" + INTERNLM2_PRO_CHAT = "internlm2-pro-chat" + def __str__(self): return self.value @@ -353,6 +359,15 @@ def is_deepseek(self) -> bool: ModelType.DEEPSEEK_CHAT, } + @property + def is_internlm(self) -> bool: + return self in { + ModelType.INTERNLM3_LATEST, + ModelType.INTERNLM3_8B_INSTRUCT, + ModelType.INTERNLM2_5_LATEST, + ModelType.INTERNLM2_PRO_CHAT, + } + @property def token_limit(self) -> int: r"""Returns the maximum token limit for a given model. @@ -411,6 +426,10 @@ def token_limit(self) -> int: ModelType.NVIDIA_MISTRAL_LARGE, ModelType.NVIDIA_MIXTRAL_8X7B, ModelType.QWEN_QWQ_32B, + ModelType.INTERNLM3_8B_INSTRUCT, + ModelType.INTERNLM3_LATEST, + ModelType.INTERNLM2_5_LATEST, + ModelType.INTERNLM2_PRO_CHAT, }: return 32_768 elif self in { @@ -634,6 +653,7 @@ class ModelPlatformType(Enum): NVIDIA = "nvidia" DEEPSEEK = "deepseek" SGLANG = "sglang" + INTERNLM = "internlm" @property def is_openai(self) -> bool: @@ -736,6 +756,11 @@ def is_deepseek(self) -> bool: r"""Returns whether this platform is DeepSeek.""" return self is ModelPlatformType.DEEPSEEK + @property + def is_internlm(self) -> bool: + r"""Returns whether this platform is InternLM.""" + return self is ModelPlatformType.INTERNLM + class AudioModelType(Enum): TTS_1 = "tts-1" diff --git a/camel/types/unified_model_type.py b/camel/types/unified_model_type.py index 631ab623cb..b4027cc6e5 100644 --- a/camel/types/unified_model_type.py +++ b/camel/types/unified_model_type.py @@ -113,6 +113,11 @@ def is_qwen(self) -> bool: r"""Returns whether the model is a Qwen model.""" return True + @property + def is_internlm(self) -> bool: + r"""Returns whether the model is a InternLM model.""" + return True + @property def support_native_structured_output(self) -> bool: r"""Returns whether the model supports native structured output.""" diff --git a/docs/key_modules/models.md b/docs/key_modules/models.md index 683e0968ad..45b20f3067 100644 --- a/docs/key_modules/models.md +++ b/docs/key_modules/models.md @@ -71,6 +71,10 @@ The following table lists currently supported model platforms by CAMEL. | ZhipuAI | glm-4v | Y | | ZhipuAI | glm-4 | N | | ZhipuAI | glm-3-turbo | N | +| InternLM | internlm3-latest | N | +| InternLM | internlm3-8b-instruct | N | +| InternLM | internlm2.5-latest | N | +| InternLM | internlm2-pro-chat | N | | Reka | reka-core | Y | | Reka | reka-flash | Y | | Reka | reka-edge | Y | diff --git a/examples/models/internlm_model_example.py b/examples/models/internlm_model_example.py new file mode 100644 index 0000000000..13eaa42b56 --- /dev/null +++ b/examples/models/internlm_model_example.py @@ -0,0 +1,46 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +from camel.agents import ChatAgent +from camel.configs import InternLMConfig +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +model = ModelFactory.create( + model_platform=ModelPlatformType.INTERNLM, + model_type=ModelType.INTERNLM3_LATEST, + model_config_dict=InternLMConfig(temperature=0.2).as_dict(), +) + +# Define system message +sys_msg = "You are a helpful assistant." + +# Set agent +camel_agent = ChatAgent(system_message=sys_msg, model=model) + +user_msg = """Say hi to CAMEL AI, one open-source community + dedicated to the study of autonomous and communicative agents.""" + +# Get response information +response = camel_agent.step(user_msg) +print(response.msgs[0].content) + +''' +=============================================================================== +Hi CAMEL AI! It's great to meet you. As an open-source community dedicated to +the study of autonomous and communicative agents, we're excited to collaborate +and explore the exciting world of AI. Let's work together to advance our +understanding and applications in this fascinating field. +=============================================================================== +''' diff --git a/test/models/test_internlm_model.py b/test/models/test_internlm_model.py new file mode 100644 index 0000000000..669cfc0a46 --- /dev/null +++ b/test/models/test_internlm_model.py @@ -0,0 +1,56 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +import re + +import pytest + +from camel.configs import InternLMConfig +from camel.models import InternLMModel +from camel.types import ModelType + + +@pytest.mark.model_backend +@pytest.mark.parametrize( + "model_type", + [ + ModelType.INTERNLM3_8B_INSTRUCT, + ModelType.INTERNLM3_LATEST, + ModelType.INTERNLM2_5_LATEST, + ModelType.INTERNLM2_PRO_CHAT, + ], +) +def test_internlm_model(model_type: ModelType): + model = InternLMModel(model_type) + assert model.model_type == model_type + assert model.model_config_dict == InternLMConfig().as_dict() + assert isinstance(model.model_type.value_for_tiktoken, str) + assert isinstance(model.model_type.token_limit, int) + + +@pytest.mark.model_backend +def test_internlm_model_unexpected_argument(): + model_type = ModelType.INTERNLM3_LATEST + model_config_dict = {"model_path": "internlm-max"} + + with pytest.raises( + ValueError, + match=re.escape( + ( + "Unexpected argument `model_path` is " + "input into InternLM model backend." + ) + ), + ): + _ = InternLMModel(model_type, model_config_dict)