diff --git a/.env.example b/.env.example index f4a8b25816..593eb3ed42 100644 --- a/.env.example +++ b/.env.example @@ -62,6 +62,11 @@ # JINA API (https://jina.ai/) # JINA_API_KEY="Fill your API key here" +# AIHUBMIX API (https://aihubmix.com/) +# AIHUBMIX_API_KEY="Fill your API key here" +# AIHUBMIX_API_BASE_URL="https://aihubmix.com" +# AIHUBMIX_APP_CODE="Fill your APP Code here" + #=========================================== # Tools & Services API #=========================================== diff --git a/camel/configs/__init__.py b/camel/configs/__init__.py index d797efedc3..49728a0441 100644 --- a/camel/configs/__init__.py +++ b/camel/configs/__init__.py @@ -11,6 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +from .aihubmix_config import AIHUBMIX_API_PARAMS, AihubmixConfig from .aiml_config import AIML_API_PARAMS, AIMLConfig from .amd_config import AMD_API_PARAMS, AMDConfig from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig @@ -127,4 +128,6 @@ 'QIANFAN_API_PARAMS', 'CrynuxConfig', 'CRYNUX_API_PARAMS', + 'AihubmixConfig', + 'AIHUBMIX_API_PARAMS', ] diff --git a/camel/configs/aihubmix_config.py b/camel/configs/aihubmix_config.py new file mode 100644 index 0000000000..63185598f0 --- /dev/null +++ b/camel/configs/aihubmix_config.py @@ -0,0 +1,78 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +from typing import Any, Dict, List, Optional, Union + +from pydantic import BaseModel, Field + + +class AihubmixConfig(BaseModel): + r"""Defines the parameters for generating chat responses from the Aihubmix + models. + + Args: + temperature (Optional[float], optional): Sampling temperature to use, + between :obj:`0` and :obj:`2`. Higher values make the output more + random, while lower values make it more focused and deterministic. + (default: :obj:`0.2`) + top_p (Optional[float], optional): An alternative to sampling with + temperature, called nucleus sampling, where the model considers the + results of the tokens with top_p probability mass. + (default: :obj:`1.0`) + n (Optional[int], optional): How many completions to generate for each + prompt. (default: :obj:`1`) + stream (bool, optional): If True, partial message deltas will be sent + as data-only server-sent events as they become available. + (default: :obj:`False`) + stop (Optional[Union[str, List[str]]], optional): Up to 4 sequences + where the API will stop generating further tokens. + (default: :obj:`None`) + max_tokens (Optional[int], optional): The maximum number of tokens to + generate in the chat completion. (default: :obj:`None`) + presence_penalty (Optional[float], optional): Number between -2.0 and + 2.0. Positive values penalize new tokens based on whether they + appear in the text so far. (default: :obj:`0.0`) + frequency_penalty (Optional[float], optional): Number between -2.0 and + 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far. (default: :obj:`0.0`) + logit_bias (Optional[Dict[str, float]], optional): Modify the + likelihood of specified tokens appearing in the completion. + (default: :obj:`None`) + user (Optional[str], optional): A unique identifier representing your + end-user. (default: :obj:`None`) + response_format (Optional[Dict[str, str]], optional): An object + specifying the format that the model must output. + (default: :obj:`None`) + """ + temperature: Optional[float] = 0.2 + top_p: Optional[float] = 1.0 + n: Optional[int] = 1 + stream: bool = False + stop: Optional[Union[str, List[str]]] = None + max_tokens: Optional[int] = None + presence_penalty: Optional[float] = 0.0 + frequency_penalty: Optional[float] = 0.0 + logit_bias: Optional[Dict[str, float]] = None + user: Optional[str] = None + response_format: Optional[Dict[str, str]] = None + + def as_dict(self) -> Dict[str, Any]: + r"""Convert the config to a dictionary. + + Returns: + Dict[str, Any]: The configuration as a dictionary. + """ + return self.model_dump(exclude_none=True) + + +AIHUBMIX_API_PARAMS = {param for param in AihubmixConfig.model_fields.keys()} \ No newline at end of file diff --git a/camel/models/__init__.py b/camel/models/__init__.py index b34a201e0d..6630d9bfef 100644 --- a/camel/models/__init__.py +++ b/camel/models/__init__.py @@ -11,6 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +from .aihubmix_model import AihubmixModel from .aiml_model import AIMLModel from .amd_model import AMDModel from .anthropic_model import AnthropicModel @@ -106,4 +107,5 @@ 'WatsonXModel', 'QianfanModel', 'CrynuxModel', + 'AihubmixModel', ] diff --git a/camel/models/aihubmix_gemini_model.py b/camel/models/aihubmix_gemini_model.py new file mode 100644 index 0000000000..63d70fdb44 --- /dev/null +++ b/camel/models/aihubmix_gemini_model.py @@ -0,0 +1,323 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +import os +import uuid +from typing import Any, Dict, List, Optional, Sequence, Type, Union, cast + +from openai import AsyncStream, Stream +from pydantic import BaseModel + +from camel.configs import GeminiConfig +from camel.messages import OpenAIMessage +from camel.models.base_model import BaseModelBackend +from camel.types import ( + ChatCompletion, + ChatCompletionChunk, + Choice, + ModelType, +) +from camel.utils import BaseTokenCounter, api_keys_required + +if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true": + try: + from langfuse.decorators import observe + except ImportError: + from camel.utils import observe +elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true": + try: + from traceroot import trace as observe # type: ignore[import] + except ImportError: + from camel.utils import observe +else: + from camel.utils import observe + + +class AihubmixGeminiModel(BaseModelBackend): + r"""AIHUBMIX Gemini API model using Google's native SDK. + + Args: + model_type (Union[ModelType, str]): Model for which a backend is + created, one of AIHUBMIX Gemini series. + model_config_dict (Optional[Dict[str, Any]], optional): A dictionary + that will be fed into:obj:`google.genai.types.GenerateContentConfig`. + If :obj:`None`, :obj:`GeminiConfig().as_dict()` will be used. + (default: :obj:`None`) + api_key (Optional[str], optional): The API key for authenticating with + the AIHUBMIX service. (default: :obj:`None`) + url (Optional[str], optional): The url to the AIHUBMIX service. + (default: :obj:`https://aihubmix.com/gemini`) + token_counter (Optional[BaseTokenCounter], optional): Token counter to + use for the model. If not provided, :obj:`OpenAITokenCounter( + ModelType.GPT_4O_MINI)` will be used. + (default: :obj:`None`) + timeout (Optional[float], optional): The timeout value in seconds for + API calls. If not provided, will fall back to the MODEL_TIMEOUT + environment variable or default to 180 seconds. + (default: :obj:`None`) + max_retries (int, optional): Maximum number of retries for API calls. + (default: :obj:`3`) + **kwargs (Any): Additional arguments to pass to the client + initialization. + """ + + @api_keys_required( + [ + ("api_key", 'AIHUBMIX_API_KEY'), + ] + ) + def __init__( + self, + model_type: Union[ModelType, str], + model_config_dict: Optional[Dict[str, Any]] = None, + api_key: Optional[str] = None, + url: Optional[str] = None, + token_counter: Optional[BaseTokenCounter] = None, + timeout: Optional[float] = None, + max_retries: int = 3, + **kwargs: Any, + ) -> None: + super().__init__( + model_type=model_type, + model_config_dict=model_config_dict or GeminiConfig().as_dict(), + api_key=api_key, + url=url, + token_counter=token_counter, + timeout=timeout, + max_retries=max_retries, + ) + + self.api_key = api_key or os.environ.get("AIHUBMIX_API_KEY") + self.url = url or os.environ.get( + "GEMINI_API_BASE_URL", "https://aihubmix.com/gemini" + ) + self.timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180)) + + # Initialize the Google genai client + try: + from google import genai + self._client = genai.Client( + api_key=self.api_key, + http_options={"base_url": self.url}, + ) + except ImportError: + raise ImportError( + "Please install google-genai package to use AihubmixGeminiModel: " + "pip install google-genai" + ) + + @observe() + def _run( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of AIHUBMIX Gemini chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + response_format (Optional[Type[BaseModel]]): The format of the + response. + tools (Optional[List[Dict[str, Any]]]): The schema of the tools to + use for the request. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + from google.genai import types + from google.genai.types import Content, Part + + # Convert OpenAI messages to Google genai format + contents: List[Content] = [] + for msg in messages: + role = msg.get('role', 'user') + content = msg.get('content', '') + + # Convert role mapping to AIHUBMIX supported roles + # AIHUBMIX only supports "user" and "model" roles + if role == 'assistant': + role_str = 'model' + elif role != 'user': + # Default to user role for any other roles (system, etc.) + role_str = 'user' + else: + role_str = 'user' + + # Fix 1: Use string literals directly for role + # The Content constructor accepts string literals + contents.append( + Content( + role=role_str, # Use string literal directly + parts=[Part.from_text(text=str(content))] + ) + ) + + # Prepare generation config - filter out parameters not supported by GenerateContentConfig + # Common unsupported parameters: n, stream + supported_params = self.model_config_dict.copy() + # Remove unsupported parameters for Google's GenerateContentConfig + unsupported_params = ['n', 'stream'] + for param in unsupported_params: + supported_params.pop(param, None) + + generation_config = types.GenerateContentConfig( + **supported_params + ) + + # Make the API call + # Fix: Convert contents to the expected format for generate_content + # The API expects a more flexible type, so we'll cast it properly + response = self._client.models.generate_content( + model=self.model_type, + contents=cast(Any, contents), # Fix 2: Cast to Any to handle type variance + config=generation_config, + ) + + # Convert response to OpenAI format + # This is a simplified conversion - in a real implementation, + # you would need to handle all the fields properly + # Fix 3: Handle possible None values and properly construct the response + candidate_content = response.candidates[0].content if response.candidates else None + content_text = "" + if candidate_content and candidate_content.parts: + content_text = str(candidate_content.parts[0].text) if candidate_content.parts[0] else "" + + # Create proper Choice object for ChatCompletion + from openai.types.chat.chat_completion_message import ChatCompletionMessage + + choice = Choice( + index=0, + message=ChatCompletionMessage( + role="assistant", + content=content_text, + ), + finish_reason="stop" + ) + + chat_completion = ChatCompletion( + id=f"aihubmix-gemini-{uuid.uuid4().hex[:8]}", + choices=[choice], + created=int(uuid.uuid4().int >> 64), + model=self.model_type, + object="chat.completion", + ) + + return chat_completion + + @observe() + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: + r"""Runs inference of AIHUBMIX Gemini chat completion asynchronously. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + response_format (Optional[Type[BaseModel]]): The format of the + response. + tools (Optional[List[Dict[str, Any]]]): The schema of the tools to + use for the request. + + Returns: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `AsyncStream[ChatCompletionChunk]` in the stream mode. + """ + # For simplicity, we're calling the sync version in async context + # In a real implementation, you would use the async client methods + return self._run(messages, response_format, tools) + + def run( + self, + messages: List[Dict], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Run the model with the given messages (dict format). + + Args: + messages (List[Dict]): List of messages to send to the model. + response_format (Optional[Type[BaseModel]], optional): Expected + response format. (default: :obj:`None`) + tools (Optional[List[Dict[str, Any]]], optional): List of tools to + use. (default: :obj:`None`) + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: Model response. + """ + # Convert Dict to OpenAIMessage properly + openai_messages = [] + for msg in messages: + openai_messages.append({ + "role": msg.get('role', 'user'), + "content": msg.get('content', ''), + }) + return self._run(openai_messages, response_format, tools) + + async def arun( + self, + messages: List[Dict], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: + r"""Asynchronously run the model with the given messages (dict format). + + Args: + messages (List[Dict]): List of messages to send to the model. + response_format (Optional[Type[BaseModel]], optional): Expected + response format. (default: :obj:`None`) + tools (Optional[List[Dict[str, Any]]], optional): List of tools to + use. (default: :obj:`None`) + + Returns: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: Model response. + """ + # Convert Dict to OpenAIMessage properly + openai_messages = [] + for msg in messages: + openai_messages.append({ + "role": msg.get('role', 'user'), + "content": msg.get('content', ''), + }) + return await self._arun(openai_messages, response_format, tools) + + @property + def token_counter(self) -> BaseTokenCounter: + r"""Returns the token counter used by the model. + + Returns: + BaseTokenCounter: The token counter. + """ + # Return a default token counter - in a real implementation, + # you would use a proper token counter for Gemini models + if self._token_counter is None: + from camel.utils import OpenAITokenCounter + self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI) + return self._token_counter + + def check_api_key_or_fail(self) -> None: + r"""Check if the API key is valid or fail. + + Raises: + ValueError: If the API key is not set or invalid. + """ + if not self.api_key: + raise ValueError("AIHUBMIX API key is required for AihubmixGeminiModel") diff --git a/camel/models/aihubmix_model.py b/camel/models/aihubmix_model.py new file mode 100644 index 0000000000..e8c2c1b6be --- /dev/null +++ b/camel/models/aihubmix_model.py @@ -0,0 +1,228 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +import os +from typing import Any, Dict, List, Optional, Type, Union + +from openai import AsyncStream, Stream +from pydantic import BaseModel + +from camel.configs.aihubmix_config import AihubmixConfig +from camel.messages import OpenAIMessage +from camel.models.anthropic_model import AnthropicModel +from camel.models.base_model import BaseModelBackend +from camel.models.gemini_model import GeminiModel +from camel.models.openai_compatible_model import OpenAICompatibleModel +from camel.models.aihubmix_gemini_model import AihubmixGeminiModel +from camel.types import ( + ChatCompletion, + ChatCompletionChunk, + ModelType, +) +from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required + + +class AihubmixModel(BaseModelBackend): + r"""AIHUBMIX provider model backend. + + This model acts as a router to different model backends based on the + model type. It automatically selects the appropriate backend depending + on the model name prefix. + + Args: + model_type (Union[ModelType, str]): Model type, supported AIHUBMIX + model names. + model_config_dict (Optional[Dict[str, Any]], optional): A dictionary + that will be fed into the model. If :obj:`None`, + :obj:`AihubmixConfig().as_dict()` will be used. + (default: :obj:`None`) + api_key (Optional[str], optional): The API key for authenticating + with AIHUBMIX. If not provided, it will be retrieved from the + environment variable :obj:`AIHUBMIX_API_KEY`. (default: :obj:`None`) + url (Optional[str], optional): The base URL for AIHUBMIX API. + Defaults to :obj:`https://api.aihubmix.com/v1` or can be set via + the environment variable :obj:`AIHUBMIX_API_BASE_URL`. + (default: :obj:`None`) + token_counter (Optional[BaseTokenCounter], optional): Token counter to + use for the model. If not provided, a default counter will be used. + (default: :obj:`None`) + timeout (Optional[float], optional): The timeout for API requests. + If not provided, it will use the default timeout. (default: :obj:`None`) + max_retries (int, optional): Maximum number of retries for API calls. + (default: :obj:`3`) + **kwargs (Any): Additional keyword arguments passed to the model backend. + """ + + @api_keys_required([("api_key", "AIHUBMIX_API_KEY")]) + def __init__( + self, + model_type: Union[ModelType, str], + model_config_dict: Optional[Dict[str, Any]] = None, + api_key: Optional[str] = None, + url: Optional[str] = None, + token_counter: Optional[BaseTokenCounter] = None, + timeout: Optional[float] = None, + max_retries: int = 3, + **kwargs: Any, + ) -> None: + if model_config_dict is None: + model_config_dict = AihubmixConfig().as_dict() + super().__init__(model_type, model_config_dict) + + api_key = api_key or os.environ.get("AIHUBMIX_API_KEY") + model_name = str(self.model_type) + + common_args = { + "model_type": self.model_type, + "model_config_dict": self.model_config_dict, + "api_key": api_key, + "token_counter": token_counter, + "timeout": timeout, + "max_retries": max_retries, + **kwargs, + } + + # Route to appropriate backend based on model name + if model_name.startswith("claude"): + base_url = url or os.environ.get( + "AIHUBMIX_API_BASE_URL", "https://api.aihubmix.com/v1" + ) + self.model_backend: Union[ + AnthropicModel, GeminiModel, OpenAICompatibleModel, AihubmixGeminiModel + ] = AnthropicModel(url=base_url, **common_args) + elif ( + model_name.startswith("gemini") + or model_name.startswith("imagen") + ) and not model_name.endswith( + ("-nothink", "-search") + ) and "embedding" not in model_name: + # Use the specialized AihubmixGeminiModel for AIHUBMIX Gemini models + # For AIHUBMIX Gemini models, we should use the specific endpoint + gemini_url = "https://aihubmix.com/gemini" + self.model_backend = AihubmixGeminiModel( + url=gemini_url, **common_args + ) + else: + base_url = url or os.environ.get( + "AIHUBMIX_API_BASE_URL", "https://api.aihubmix.com/v1" + ) + self.model_backend = OpenAICompatibleModel( + url=base_url, **common_args + ) + + def _run( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Run the model with the given messages. + + Args: + messages (List[OpenAIMessage]): List of messages to send to the model. + response_format (Optional[Type[BaseModel]], optional): Expected + response format. (default: :obj:`None`) + tools (Optional[List[Dict[str, Any]]], optional): List of tools to + use. (default: :obj:`None`) + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: Model response. + """ + return self.model_backend._run( + messages=messages, response_format=response_format, tools=tools + ) + + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: + r"""Asynchronously run the model with the given messages. + + Args: + messages (List[OpenAIMessage]): List of messages to send to the model. + response_format (Optional[Type[BaseModel]], optional): Expected + response format. (default: :obj:`None`) + tools (Optional[List[Dict[str, Any]]], optional): List of tools to + use. (default: :obj:`None`) + + Returns: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: Model response. + """ + return await self.model_backend._arun( + messages=messages, response_format=response_format, tools=tools + ) + + def run( + self, + messages: List[Dict], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Run the model with the given messages (dict format). + + Args: + messages (List[Dict]): List of messages to send to the model. + response_format (Optional[Type[BaseModel]], optional): Expected + response format. (default: :obj:`None`) + tools (Optional[List[Dict[str, Any]]], optional): List of tools to + use. (default: :obj:`None`) + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: Model response. + """ + return self.model_backend.run( + messages=messages, response_format=response_format, tools=tools + ) + + async def arun( + self, + messages: List[Dict], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: + r"""Asynchronously run the model with the given messages (dict format). + + Args: + messages (List[Dict]): List of messages to send to the model. + response_format (Optional[Type[BaseModel]], optional): Expected + response format. (default: :obj:`None`) + tools (Optional[List[Dict[str, Any]]], optional): List of tools to + use. (default: :obj:`None`) + + Returns: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: Model response. + """ + return await self.model_backend.arun( + messages=messages, response_format=response_format, tools=tools + ) + + @property + def token_counter(self) -> BaseTokenCounter: + r"""Returns the token counter used by the model backend. + + Returns: + BaseTokenCounter: The token counter. + """ + return self.model_backend.token_counter + + def check_api_key_or_fail(self) -> None: + r"""Check if the API key is valid or fail. + + Raises: + ValueError: If the API key is not set or invalid. + """ + # Fix: Check if the model_backend has the check_api_key_or_fail method before calling it + if hasattr(self.model_backend, 'check_api_key_or_fail'): + self.model_backend.check_api_key_or_fail() diff --git a/camel/models/model_factory.py b/camel/models/model_factory.py index 1669fce901..c065b9cc19 100644 --- a/camel/models/model_factory.py +++ b/camel/models/model_factory.py @@ -15,6 +15,7 @@ import os from typing import Any, ClassVar, Dict, Optional, Type, Union +from camel.models.aihubmix_model import AihubmixModel from camel.models.aiml_model import AIMLModel from camel.models.amd_model import AMDModel from camel.models.anthropic_model import AnthropicModel @@ -107,6 +108,7 @@ class ModelFactory: ModelPlatformType.WATSONX: WatsonXModel, ModelPlatformType.QIANFAN: QianfanModel, ModelPlatformType.CRYNUX: CrynuxModel, + ModelPlatformType.AIHUBMIX: AihubmixModel, } @staticmethod diff --git a/camel/types/enums.py b/camel/types/enums.py index cec536c547..9e6e324a8c 100644 --- a/camel/types/enums.py +++ b/camel/types/enums.py @@ -1083,6 +1083,11 @@ def is_crynux(self) -> bool: ModelType.CRYNUX_NOUS_HERMES_3_LLAMA_3_2_3B, } + @property + def is_aihubmix(self) -> bool: + r"""Returns whether this type of models is an aihubmix model.""" + return self.value.startswith("aihubmix") + @property def is_aiml(self) -> bool: return self in { @@ -1718,6 +1723,7 @@ class ModelPlatformType(Enum): WATSONX = "watsonx" QIANFAN = "qianfan" CRYNUX = "crynux" + AIHUBMIX = "aihubmix" @classmethod def from_name(cls, name): diff --git a/examples/models/aihubmix_model_example.py b/examples/models/aihubmix_model_example.py new file mode 100644 index 0000000000..55a0adbbd4 --- /dev/null +++ b/examples/models/aihubmix_model_example.py @@ -0,0 +1,117 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +import os + +from camel.agents import ChatAgent +from camel.configs import AihubmixConfig +from camel.models import ModelFactory +from camel.models.aihubmix_model import AihubmixModel +from camel.types import ModelPlatformType + +# Check if API key is set +api_key = os.environ.get("AIHUBMIX_API_KEY") +api_base_url = os.environ.get("AIHUBMIX_API_BASE_URL", "https://aihubmix.com") +app_code = os.environ.get("AIHUBMIX_APP_CODE", "APP Code") + +print(f"AIHUBMIX API Base URL: {api_base_url}") +print(f"AIHUBMIX APP Code: {app_code}") +if not api_key: + print("Warning: AIHUBMIX_API_KEY environment variable not set.") + print("Please set it using: export AIHUBMIX_API_KEY='your-api-key'") + # Can choose to exit or continue execution (if just testing) + # exit(1) + +# Create model configuration using the config class +# Note: Claude models require either temperature or top_p to be set, not both +model_config = AihubmixConfig( + temperature=0.2, + # Explicitly set top_p to None to avoid conflict with temperature + top_p=None +) + +# Select model to test +MODEL_TYPE = "claude-3-7-sonnet" + +model = ModelFactory.create( + model_platform=ModelPlatformType.AIHUBMIX, + model_type=MODEL_TYPE, + model_config_dict=model_config.as_dict(), + api_key=api_key, # Explicitly pass API key +) + +# Show model routing information +# Create AihubmixModel instance to demonstrate routing logic +aihubmix_model = AihubmixModel( + model_type=MODEL_TYPE, + model_config_dict=model_config.as_dict(), + api_key=api_key, + url=api_base_url +) + +# Output model information +print(f"Model Platform: {ModelPlatformType.AIHUBMIX}") +print(f"Model Type: {MODEL_TYPE}") + +# Display backend information based on routing rules +if MODEL_TYPE.startswith("claude"): + print("Routing Rule: Claude models use Anthropic SDK") + print(f"Actual Backend Model: {type(aihubmix_model.model_backend).__name__}") + print(f"Backend URL: {aihubmix_model.model_backend._url}") +elif (MODEL_TYPE.startswith("gemini") or MODEL_TYPE.startswith("imagen")) and not MODEL_TYPE.endswith(("-nothink", "-search")) and "embedding" not in MODEL_TYPE: + print("Routing Rule: Gemini/Imagen models use Google SDK via AIHUBMIX") + print(f"Actual Backend Model: {type(aihubmix_model.model_backend).__name__}") + print(f"Backend URL: {getattr(aihubmix_model.model_backend, 'url', 'Not available')}") + print("Note: AIHUBMIX Gemini models now use Google's native SDK for better compatibility") +else: + print("Routing Rule: Other models use OpenAI compatible interface") + print(f"Actual Backend Model: {type(aihubmix_model.model_backend).__name__}") + print(f"Backend URL: {aihubmix_model.model_backend._url}") + +print(f"Temperature: {model_config.temperature}") +if model_config.top_p is not None: + print(f"Top P: {model_config.top_p}") + +# Define system message +sys_msg = "You are a helpful assistant." + +# Set agent +camel_agent = ChatAgent(system_message=sys_msg, model=model) + +# Use simple English message, avoiding special characters +user_msg = "Hello, this is a test message for CAMEL AI." + +print("Sending message to AIHUBMIX model...") +print("Note: Claude models require either temperature or top_p, but not both.") +try: + # Get response information + response = camel_agent.step(user_msg) + print("Response received:") + print(response.msgs[0].content) +except Exception as e: + print(f"Error occurred: {e}") + print("Troubleshooting tips:") + print("1. Claude models require either temperature or top_p, but not both") + print("2. Check your API key and network connectivity") + print("3. Verify the model name is supported on AIHUBMIX platform") + print("4. For Gemini models, ensure google-genai package is installed") + print("5. Check that the model is available on AIHUBMIX platform") + import traceback + traceback.print_exc() + +''' +=============================================================================== + Hello CAMEL AI! It's great to meet a community dedicated to the study of + autonomous and communicative agents. How can I assist you today? +=============================================================================== +''' \ No newline at end of file diff --git a/examples/models/aihubmix_multi_model_example.py b/examples/models/aihubmix_multi_model_example.py new file mode 100644 index 0000000000..7d965ac621 --- /dev/null +++ b/examples/models/aihubmix_multi_model_example.py @@ -0,0 +1,161 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +import os +import traceback +from typing import List + +from camel.agents import ChatAgent +from camel.configs import AihubmixConfig +from camel.models import ModelFactory +from camel.models.aihubmix_model import AihubmixModel +from camel.types import ModelPlatformType + + +def test_single_model(model_type: str, api_key: str, api_base_url: str, app_code: str): + """Test a single model. + + Args: + model_type (str): The model type to test. + api_key (str): The API key for authentication. + api_base_url (str): The base URL for the API. + app_code (str): The application code. + + Returns: + bool: True if test was successful, False otherwise. + """ + print("=" * 60) + print(f"Testing model: {model_type}") + print("=" * 60) + + # Create model configuration using the config class + # Note: Claude models require either temperature or top_p to be set, not both + model_config = AihubmixConfig( + temperature=0.2, + # Explicitly set top_p to None to avoid conflict with temperature + top_p=None + ) + + try: + model = ModelFactory.create( + model_platform=ModelPlatformType.AIHUBMIX, + model_type=model_type, + model_config_dict=model_config.as_dict(), + api_key=api_key, # Explicitly pass API key + ) + except Exception as e: + print(f"Failed to create model {model_type}: {e}") + return False + + # Show model routing information + # Create AihubmixModel instance to demonstrate routing logic + aihubmix_model = AihubmixModel( + model_type=model_type, + model_config_dict=model_config.as_dict(), + api_key=api_key, + url=api_base_url + ) + + # Output model information + print(f"Model Platform: {ModelPlatformType.AIHUBMIX}") + print(f"Model Type: {model_type}") + + # Display backend information based on routing rules + if model_type.startswith("claude"): + print("Routing Rule: Claude models use Anthropic SDK") + print(f"Actual Backend Model: {type(aihubmix_model.model_backend).__name__}") + print(f"Backend URL: {aihubmix_model.model_backend._url}") + elif (model_type.startswith("gemini") or model_type.startswith("imagen")) and not model_type.endswith(("-nothink", "-search")) and "embedding" not in model_type: + print("Routing Rule: Gemini/Imagen models use Google SDK via AIHUBMIX") + print(f"Actual Backend Model: {type(aihubmix_model.model_backend).__name__}") + print(f"Backend URL: {getattr(aihubmix_model.model_backend, 'url', 'Not available')}") + print("Note: AIHUBMIX Gemini models now use Google's native SDK for better compatibility") + else: + print("Routing Rule: Other models use OpenAI compatible interface") + print(f"Actual Backend Model: {type(aihubmix_model.model_backend).__name__}") + print(f"Backend URL: {aihubmix_model.model_backend._url}") + + print(f"Temperature: {model_config.temperature}") + if model_config.top_p is not None: + print(f"Top P: {model_config.top_p}") + + # Define system message + sys_msg = "You are a helpful assistant." + + # Set agent + camel_agent = ChatAgent(system_message=sys_msg, model=model) + + # Use simple English message, avoiding special characters + user_msg = "Hello, this is a test message for CAMEL AI." + + print("Sending message to AIHUBMIX model...") + print("Note: Claude models require either temperature or top_p, but not both.") + try: + # Get response information + response = camel_agent.step(user_msg) + print("Response received:") + print(response.msgs[0].content) + return True + except Exception as e: + print(f"Error occurred: {e}") + print("Troubleshooting tips:") + print("1. Claude models require either temperature or top_p, but not both") + print("2. Check your API key and network connectivity") + print("3. Verify the model name is supported on AIHUBMIX platform") + print("4. For Gemini models, ensure google-genai package is installed") + print("5. Check that the model is available on AIHUBMIX platform") + traceback.print_exc() + return False + + +def main(): + # Check if API key is set + api_key = os.environ.get("AIHUBMIX_API_KEY") + api_base_url = os.environ.get("AIHUBMIX_API_BASE_URL", "https://aihubmix.com") + app_code = os.environ.get("AIHUBMIX_APP_CODE", "APP Code") + + print(f"AIHUBMIX API Base URL: {api_base_url}") + print(f"AIHUBMIX APP Code: {app_code}") + if not api_key: + print("Warning: AIHUBMIX_API_KEY environment variable not set.") + print("Please set it using: export AIHUBMIX_API_KEY='your-api-key'") + return + + # Define list of models to test + models_to_test: List[str] = [ + "gemini-2.5-pro", + "claude-3-5-sonnet", + "gpt-5", + "glm-4.6" + ] + + print(f"Preparing to test {len(models_to_test)} models: {', '.join(models_to_test)}") + + # Test each model + successful_tests = 0 + for model_type in models_to_test: + try: + success = test_single_model(model_type, api_key, api_base_url, app_code) + if success: + successful_tests += 1 + except Exception as e: + print(f"Unexpected error when testing model {model_type}: {e}") + traceback.print_exc() + + print("\n" + "=" * 60) + print(f"Test summary: {successful_tests}/{len(models_to_test)} models tested successfully") + print("=" * 60) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/test/models/test_aihubmix_model.py b/test/models/test_aihubmix_model.py new file mode 100644 index 0000000000..edd8f49f5e --- /dev/null +++ b/test/models/test_aihubmix_model.py @@ -0,0 +1,40 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + + +import pytest + +from camel.configs import AihubmixConfig +from camel.models import AihubmixModel +from camel.types import ModelType + + +@pytest.mark.model_backend +@pytest.mark.parametrize( + "model_type", + [ + ModelType.GPT_4O, + ModelType.GPT_4O_MINI, + ModelType.CLAUDE_3_5_SONNET, + ModelType.CLAUDE_3_5_HAIKU, + ModelType.GEMINI_2_0_FLASH, + ModelType.GEMINI_1_5_PRO, + ], +) +def test_aihubmix_model(model_type: ModelType): + model = AihubmixModel(model_type) + assert model.model_type == model_type + assert model.model_config_dict == AihubmixConfig().as_dict() + assert isinstance(model.model_type.value_for_tiktoken, str) + assert isinstance(model.model_type.token_limit, int) \ No newline at end of file