From e36211f5cdcd6c6c0649314de9bd03998d5563c0 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Thu, 26 Dec 2024 22:50:12 -0600 Subject: [PATCH 01/28] [WIP] refactor tools in ChatAgent --- camel/agents/chat_agent.py | 658 +++++++--------------- camel/models/anthropic_model.py | 8 +- camel/models/azure_openai_model.py | 7 +- camel/models/base_model.py | 26 +- camel/models/cohere_model.py | 11 +- camel/models/deepseek_model.py | 7 +- camel/models/gemini_model.py | 7 +- camel/models/groq_model.py | 5 +- camel/models/litellm_model.py | 8 +- camel/models/mistral_model.py | 8 +- camel/models/model_manager.py | 10 +- camel/models/nemotron_model.py | 7 +- camel/models/nvidia_model.py | 7 +- camel/models/ollama_model.py | 7 +- camel/models/openai_compatible_model.py | 7 +- camel/models/openai_model.py | 7 +- camel/models/qwen_model.py | 7 +- camel/models/reka_model.py | 10 +- camel/models/samba_model.py | 10 +- camel/models/sglang_model.py | 7 +- camel/models/stub_model.py | 10 +- camel/models/togetherai_model.py | 7 +- camel/models/vllm_model.py | 7 +- camel/models/yi_model.py | 7 +- camel/models/zhipuai_model.py | 7 +- examples/models/nemotron_model_example.py | 2 +- test/agents/test_chat_agent.py | 16 +- test/agents/test_role_playing.py | 4 +- test/models/test_model_factory.py | 2 +- test/models/test_model_manager.py | 8 +- 30 files changed, 385 insertions(+), 509 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 18a05638a4..38f653b266 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -16,7 +16,7 @@ import json import logging import re -import uuid +import textwrap from collections import defaultdict from typing import ( TYPE_CHECKING, @@ -31,7 +31,6 @@ ) from openai.types.chat import ChatCompletionMessageToolCall -from openai.types.chat.chat_completion_message_tool_call import Function from pydantic import BaseModel, ValidationError from camel.agents.base import BaseAgent @@ -49,6 +48,7 @@ ModelProcessingError, ) from camel.responses import ChatAgentResponse +from camel.toolkits import FunctionTool from camel.types import ( ChatCompletion, ChatCompletionChunk, @@ -58,9 +58,10 @@ RoleType, ) from camel.utils import ( - func_string_to_callable, - generate_prompt_for_structured_output, get_model_encoding, +) +from camel.utils.commons import ( + func_string_to_callable, get_pydantic_object_schema, json_to_function_code, ) @@ -69,7 +70,6 @@ from openai import Stream from camel.terminators import ResponseTerminator - from camel.toolkits import FunctionTool logger = logging.getLogger(__name__) @@ -86,6 +86,75 @@ from camel.utils import track_agent +def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str: + r"""Generates a tool prompt based on the provided tool schema list. + + Returns: + str: A string representing the tool prompt. + """ + tool_prompts = [] + + for tool in tool_schema_list: + tool_info = tool["function"] + tool_name = tool_info["name"] + tool_description = tool_info["description"] + tool_json = json.dumps(tool_info, indent=4) + + prompt = ( + f"Use the function '{tool_name}' to '{tool_description}':\n" + f"{tool_json}\n" + ) + tool_prompts.append(prompt) + + tool_prompt_str = "\n".join(tool_prompts) + + final_prompt = textwrap.dedent( + f"""\ + You have access to the following functions: + + {tool_prompt_str} + + If you choose to call a function ONLY reply in the following format with no prefix or suffix: + + {{"example_name": "example_value"}} + + Reminder: + - Function calls MUST follow the specified format, start with + - Required parameters MUST be specified + - Only call one function at a time + - Put the entire function call reply on one line + - If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls. + """ # noqa: E501 + ) + return final_prompt + + +def _parse_tool_response(self, response: str): + r"""Parses the tool response to extract the function name and + arguments. + + Args: + response (str): The response from the model containing the + function call. + + Returns: + Optional[Dict[str, Any]]: The parsed function name and arguments + if found, otherwise :obj:`None`. + """ + function_regex = r"(.*?)" + match = re.search(function_regex, response) + + if match: + function_name, args_string = match.groups() + try: + args = json.loads(args_string) + return {"function": function_name, "arguments": args} + except json.JSONDecodeError as error: + logger.error(f"Error parsing function arguments: {error}") + return None + return None + + class FunctionCallingRecord(BaseModel): r"""Historical records of functions called in the conversation. @@ -146,11 +215,6 @@ class ChatAgent(BaseAgent): tools (Optional[List[Union[FunctionTool, Callable]]], optional): List of available :obj:`FunctionTool` or :obj:`Callable`. (default: :obj:`None`) - external_tools (Optional[List[Union[FunctionTool, Callable]]], - optional): List of external tools (:obj:`FunctionTool` or or - :obj:`Callable`) bind to one chat agent. When these tools are - called, the agent will directly return the request instead of - processing it. (default: :obj:`None`) response_terminators (List[ResponseTerminator], optional): List of :obj:`ResponseTerminator` bind to one chat agent. (default: :obj:`None`) @@ -171,229 +235,70 @@ def __init__( token_limit: Optional[int] = None, output_language: Optional[str] = None, tools: Optional[List[Union[FunctionTool, Callable]]] = None, - external_tools: Optional[List[Union[FunctionTool, Callable]]] = None, response_terminators: Optional[List[ResponseTerminator]] = None, scheduling_strategy: str = "round_robin", single_iteration: bool = False, + # TODO: Remove this after refactoring + external_tools: Optional[List[Union[FunctionTool, Callable]]] = None, ) -> None: - # Initialize the system message, converting string to BaseMessage if needed - if isinstance(system_message, str): - system_message = BaseMessage.make_assistant_message( - role_name='Assistant', content=system_message - ) - - self.orig_sys_message: Optional[BaseMessage] = system_message - self._system_message: Optional[BaseMessage] = system_message - self.role_name: str = ( - getattr(system_message, 'role_name', None) or "assistant" - ) - self.role_type: RoleType = ( - getattr(system_message, 'role_type', None) or RoleType.ASSISTANT - ) + # Set up model backend self.model_backend = ModelManager( - model - if model is not None - else ModelFactory.create( - model_platform=ModelPlatformType.DEFAULT, - model_type=ModelType.DEFAULT, + ( + model + if model is not None + else ModelFactory.create( + model_platform=ModelPlatformType.DEFAULT, + model_type=ModelType.DEFAULT, + ) ), scheduling_strategy=scheduling_strategy, ) self.model_type = self.model_backend.model_type - # Initialize tools - self.tools: List[FunctionTool] = ( - self._initialize_tools(tools) if tools else [] - ) - self.external_tools: List[FunctionTool] = ( - self._initialize_tools(external_tools) if external_tools else [] - ) - self.external_tool_names: List[str] = [ - tool.get_function_name() for tool in self.external_tools - ] - self.all_tools = self.tools + self.external_tools or [] - - # Create tool dictionaries and configure backend tools if necessary - self.tool_dict = { - tool.get_function_name(): tool for tool in self.all_tools - } - - # If the user set tools from `ChatAgent`, it will override the - # configured tools in `BaseModelBackend`. - if self.all_tools: - logger.warning( - "Overriding the configured tools in `BaseModelBackend` with the tools from `ChatAgent`." - ) - tool_schema_list = [ - tool.get_openai_tool_schema() for tool in self.all_tools - ] - self.model_backend.model_config_dict['tools'] = tool_schema_list - - self.model_token_limit = token_limit or self.model_backend.token_limit + # Set up memory context_creator = ScoreBasedContextCreator( self.model_backend.token_counter, - self.model_token_limit, + token_limit or self.model_backend.token_limit, ) self.memory: AgentMemory = memory or ChatHistoryMemory( context_creator, window_size=message_window_size ) - self.output_language: Optional[str] = output_language - if self.output_language is not None: - self.set_output_language(self.output_language) - - self.terminated: bool = False - self.response_terminators = response_terminators or [] - self.init_messages() - self.tool_prompt_added = False - self.single_iteration = single_iteration - - def _initialize_tools( - self, tools: List[Union[FunctionTool, Callable]] - ) -> List[FunctionTool]: - r"""Helper method to initialize tools as FunctionTool instances.""" - from camel.toolkits import FunctionTool - - func_tools = [] - for tool in tools: - if not isinstance(tool, FunctionTool): - tool = FunctionTool(tool) - func_tools.append(tool) - return func_tools - - def add_tool( - self, tool: Union[FunctionTool, Callable], is_external: bool = False - ) -> None: - r"""Add a tool to the agent, specifying if it's an external tool.""" - # Initialize the tool - initialized_tool = self._initialize_tools([tool]) - - # Update tools or external tools based on is_external flag - if is_external: - self.external_tools = self.external_tools + initialized_tool - self.external_tool_names.extend( - tool.get_function_name() for tool in initialized_tool + # Set up system message and initialize messages + self._system_message = ( + BaseMessage.make_assistant_message( + role_name="Assistant", content=system_message ) - else: - self.tools = self.tools + initialized_tool - - # Rebuild all_tools, and tool_dict - self.all_tools = self.tools + self.external_tools - self.tool_dict = { - tool.get_function_name(): tool for tool in self.all_tools - } - - tool_schema_list = [ - tool.get_openai_tool_schema() for tool in self.all_tools - ] - self.model_backend.model_config_dict['tools'] = tool_schema_list - - def remove_tool(self, tool_name: str, is_external: bool = False) -> bool: - r"""Remove a tool by name, specifying if it's an external tool.""" - tool_list = self.external_tools if is_external else self.tools - if not tool_list: - return False - - for tool in tool_list: - if tool.get_function_name() == tool_name: - tool_list.remove(tool) - if is_external: - self.external_tool_names.remove(tool_name) - # Reinitialize the tool dictionary - self.all_tools = (self.tools or []) + ( - self.external_tools or [] - ) - self.tool_dict = { - tool.get_function_name(): tool for tool in self.all_tools - } - tool_schema_list = [ - tool.get_openai_tool_schema() for tool in self.all_tools - ] - self.model_backend.model_config_dict['tools'] = ( - tool_schema_list - ) - return True - return False - - def list_tools(self) -> dict: - r"""List all tools, separated into normal and external tools.""" - normal_tools = [ - tool.get_function_name() for tool in (self.tools or []) - ] - external_tools = [ - tool.get_function_name() for tool in (self.external_tools or []) - ] - - return {"normal_tools": normal_tools, "external_tools": external_tools} - - # ruff: noqa: E501 - def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str: - r"""Generates a tool prompt based on the provided tool schema list. - - Args: - tool_schema_list (List[Dict]): A list of dictionaries, each - containing a tool schema. - - Returns: - str: A string representing the tool prompt. - """ - tool_prompts = [] - - for tool in tool_schema_list: - tool_info = tool['function'] - tool_name = tool_info['name'] - tool_description = tool_info['description'] - tool_json = json.dumps(tool_info, indent=4) - - prompt = f"Use the function '{tool_name}' to '{tool_description}':\n{tool_json}\n" - tool_prompts.append(prompt) - - tool_prompt_str = "\n".join(tool_prompts) - - final_prompt = f""" - You have access to the following functions: - - {tool_prompt_str} - - If you choose to call a function ONLY reply in the following format with no - prefix or suffix: - - {{"example_name": "example_value"}} - - Reminder: - - Function calls MUST follow the specified format, start with - - Required parameters MUST be specified - - Only call one function at a time - - Put the entire function call reply on one line - - If there is no function call available, answer the question like normal - with your current knowledge and do not tell the user about function calls - """ - return final_prompt - - def _parse_tool_response(self, response: str): - r"""Parses the tool response to extract the function name and - arguments. + if isinstance(system_message, str) + else system_message + ) + self._output_language = output_language + self._update_system_message_for_output_language() + self.init_messages() - Args: - response (str): The response from the model containing the - function call. + # Set up role name and role type + self.role_name: str = ( + getattr(self.system_message, "role_name", None) or "assistant" + ) + self.role_type: RoleType = ( + getattr(self.system_message, "role_type", None) + or RoleType.ASSISTANT + ) - Returns: - Optional[Dict[str, Any]]: The parsed function name and arguments - if found, otherwise :obj:`None`. - """ - function_regex = r"(.*?)" - match = re.search(function_regex, response) + # Set up tools + self._tools = ( + [ + tool if isinstance(tool, FunctionTool) else FunctionTool(tool) + for tool in tools + ] + if tools + else [] + ) - if match: - function_name, args_string = match.groups() - try: - args = json.loads(args_string) - return {"function": function_name, "arguments": args} - except json.JSONDecodeError as error: - logger.error(f"Error parsing function arguments: {error}") - return None - return None + # Set up other properties + self.terminated = False + self.response_terminators = response_terminators or [] + self.single_iteration = single_iteration def reset(self): r"""Resets the :obj:`ChatAgent` to its initial state.""" @@ -404,32 +309,49 @@ def reset(self): @property def system_message(self) -> Optional[BaseMessage]: - r"""The getter method for the property :obj:`system_message`. - - Returns: - Optional[BaseMessage]: The system message of this agent if set, - else :obj:`None`. - """ return self._system_message - @system_message.setter - def system_message(self, message: BaseMessage) -> None: - r"""The setter method for the property :obj:`system_message`. + @property + def tool_dict(self) -> Dict[str, FunctionTool]: + return {tool.get_function_name(): tool for tool in self._tools} - Args: - message (BaseMessage): The message to be set as the - new system message of this agent. + @property + def tool_list(self) -> List[str]: + return [tool.get_function_name() for tool in self._tools] + + @property + def tool_schemas(self) -> List[Dict]: + return [tool.get_openai_tool_schema() for tool in self._tools] + + def add_tool(self, tool: Union[FunctionTool, Callable]) -> None: + r"""Add a tool to the agent.""" + new_tool = ( + tool if isinstance(tool, FunctionTool) else FunctionTool(tool) + ) + self._tools.append(new_tool) + + def remove_tool(self, tool_name: str) -> bool: + r"""Remove a tool from the agent by name. + + Returns: + bool: Whether the tool was successfully removed. """ - self._system_message = message + for tool in self._tools: + if tool.get_function_name() != tool_name: + continue + self._tools.remove(tool) + return True + return False - def is_tools_added(self) -> bool: + @property + def has_tools(self) -> bool: r"""Whether tool calling is enabled for this agent. Returns: bool: Whether tool calling is enabled for this agent, determined by whether the dictionary of tools is empty. """ - return len(self.tool_dict) > 0 + return len(self._tools) > 0 def update_memory( self, message: BaseMessage, role: OpenAIBackendRole @@ -445,26 +367,18 @@ def update_memory( MemoryRecord(message=message, role_at_backend=role) ) - def set_output_language(self, output_language: str) -> BaseMessage: - r"""Sets the output language for the system message. This method - updates the output language for the system message. The output + def _update_system_message_for_output_language(self) -> None: + r"""Updates the output language for the system message. The output language determines the language in which the output text should be generated. - - Args: - output_language (str): The desired output language. - - Returns: - BaseMessage: The updated system message object. """ - self.output_language = output_language language_prompt = ( "\nRegardless of the input language, " - f"you must output text in {output_language}." + f"you must output text in {self._output_language}." ) - if self.orig_sys_message is not None: - content = self.orig_sys_message.content + language_prompt - self._system_message = self.orig_sys_message.create_new_instance( + if self._system_message is not None: + content = self._system_message.content + language_prompt + self._system_message = self._system_message.create_new_instance( content ) else: @@ -473,14 +387,6 @@ def set_output_language(self, output_language: str) -> BaseMessage: content=language_prompt, ) - system_record = MemoryRecord( - message=self._system_message, - role_at_backend=OpenAIBackendRole.SYSTEM, - ) - self.memory.clear() - self.memory.write_record(system_record) - return self._system_message - def get_info( self, session_id: Optional[str], @@ -488,7 +394,6 @@ def get_info( termination_reasons: List[str], num_tokens: int, tool_calls: List[FunctionCallingRecord], - external_tool_request: Optional[ChatCompletionMessageToolCall] = None, ) -> Dict[str, Any]: r"""Returns a dictionary containing information about the chat session. @@ -501,12 +406,6 @@ def get_info( num_tokens (int): The number of tokens used in the chat session. tool_calls (List[FunctionCallingRecord]): The list of function calling records, containing the information of called tools. - external_tool_request - (Optional[ChatCompletionMessageToolCall], optional): - The tool calling request of external tools from the model. - These requests are directly returned to the user instead of - being processed by the agent automatically. - (default: :obj:`None`) Returns: Dict[str, Any]: The chat session information. @@ -517,22 +416,15 @@ def get_info( "termination_reasons": termination_reasons, "num_tokens": num_tokens, "tool_calls": tool_calls, - "external_tool_request": external_tool_request, } def init_messages(self) -> None: r"""Initializes the stored messages list with the current system message. """ - if self._system_message is not None: - system_record = MemoryRecord( - message=self._system_message, - role_at_backend=OpenAIBackendRole.SYSTEM, - ) - self.memory.clear() - self.memory.write_record(system_record) - else: - self.memory.clear() + self.memory.clear() + if self.system_message is not None: + self.update_memory(self.system_message, OpenAIBackendRole.SYSTEM) def record_message(self, message: BaseMessage) -> None: r"""Records the externally provided message into the agent memory as if @@ -567,100 +459,47 @@ def step( flag, and session information. """ - if ( - self.model_backend.model_config_dict.get("response_format") - and response_format - ): - raise ValueError( - "The `response_format` parameter cannot be set both in " - "the model configuration and in the ChatAgent step." - ) - - self.original_model_dict = self.model_backend.model_config_dict - if response_format and self.model_type in {"gpt-4o", "gpt-4o-mini"}: - self.model_backend.model_config_dict = ( - self.original_model_dict.copy() - ) - self.model_backend.model_config_dict["response_format"] = ( - response_format - ) - # Convert input message to BaseMessage if necessary - if isinstance(input_message, str): - input_message = BaseMessage.make_user_message( - role_name='User', content=input_message + input_message = ( + BaseMessage.make_user_message( + role_name="User", content=input_message ) - - # Handle tool prompt injection if needed - if ( - self.is_tools_added() - and not self.model_type.support_native_tool_calling - and not self.tool_prompt_added - ): - self._inject_tool_prompt() + if isinstance(input_message, str) + else input_message + ) # Add user input to memory self.update_memory(input_message, OpenAIBackendRole.USER) - return self._handle_step(response_format, self.single_iteration) + return self._handle_step(response_format) - def _inject_tool_prompt(self) -> None: - r"""Generate and add the tool prompt to memory.""" - tool_prompt = self._generate_tool_prompt( - self.model_backend.model_config_dict["tools"] - ) - tool_msg = BaseMessage.make_assistant_message( - role_name="Assistant", content=tool_prompt - ) - self.update_memory(tool_msg, OpenAIBackendRole.SYSTEM) - self.tool_prompt_added = True + # def _inject_tool_prompt(self) -> None: + # r"""Generate and add the tool prompt to memory.""" + # tool_prompt = self._generate_tool_prompt( + # self.model_backend.model_config_dict["tools"] + # ) + # tool_msg = BaseMessage.make_assistant_message( + # role_name="Assistant", content=tool_prompt + # ) + # self.update_memory(tool_msg, OpenAIBackendRole.SYSTEM) + # self.tool_prompt_added = True def _handle_step( self, response_format: Optional[Type[BaseModel]], - single_step: bool, ) -> ChatAgentResponse: r"""Handles a single or multi-step interaction.""" - - if ( - self.model_backend.model_config_dict.get("tool_choice") - == "required" - and not single_step - ): - raise ValueError( - "`tool_choice` cannot be set to `required` for multi-step" - " mode. To proceed, set `single_iteration` to `True`." - ) - # Record function calls made during the session tool_call_records: List[FunctionCallingRecord] = [] - external_tool_request = None - while True: try: openai_messages, num_tokens = self.memory.get_context() except RuntimeError as e: - self.model_backend.model_config_dict = self.original_model_dict return self._step_token_exceed( e.args[1], tool_call_records, "max_tokens_exceeded" ) - # Prompt engineering approach for structured output for non-native tool calling models - inject_prompt_for_structured_output = ( - response_format - and not self.model_type.support_native_structured_output - ) - - if inject_prompt_for_structured_output: - # update last openai message - usr_msg = openai_messages.pop() - usr_msg["content"] = generate_prompt_for_structured_output( - response_format, - usr_msg["content"], # type: ignore [arg-type] - ) - openai_messages.append(usr_msg) - # Process model response ( response, @@ -671,9 +510,7 @@ def _handle_step( ) = self._step_model_response(openai_messages, num_tokens) # Try to parse structured output to return a Pydantic object - if inject_prompt_for_structured_output and isinstance( - response, ChatCompletion - ): + if response_format and isinstance(response, ChatCompletion): content = response.choices[0].message.content try: json_content = json.loads(str(content)) @@ -691,8 +528,8 @@ def _handle_step( ) output_messages[0].parsed = json_content - # Finalize on standard response in multi-step mode - if self._is_standard_response(response): + # Single-step mode + if self.single_iteration: break # Handle tool requests @@ -703,51 +540,6 @@ def _handle_step( self._step_tool_call_and_update(response) ) - if tool_request.function.name in self.external_tool_names: - external_tool_request = tool_request - info = self._step_get_info( - output_messages, - finish_reasons, - usage_dict, - response_id, - tool_call_records, - num_tokens, - tool_request, - ) - self._log_final_output(output_messages) - self.model_backend.model_config_dict = ( - self.original_model_dict - ) - return ChatAgentResponse( - msgs=output_messages, - terminated=self.terminated, - info=info, - ) - - # Single-step mode ends after one iteration - if single_step: - break - - # Optional structured output via function calling - if ( - response_format - and not inject_prompt_for_structured_output - and self.model_type - not in { - "gpt-4o", - "gpt-4o-mini", - } - ): - ( - output_messages, - finish_reasons, - usage_dict, - response_id, - tool_call, - num_tokens, - ) = self._structure_output_with_function(response_format) - tool_call_records.append(tool_call) - # Final info and response info = self._step_get_info( output_messages, @@ -756,10 +548,9 @@ def _handle_step( response_id, tool_call_records, num_tokens, - external_tool_request, ) + self._log_final_output(output_messages) - self.model_backend.model_config_dict = self.original_model_dict return ChatAgentResponse( msgs=output_messages, terminated=self.terminated, info=info ) @@ -776,32 +567,32 @@ def _extract_tool_call( Optional[ChatCompletionMessageToolCall]: The parsed tool call if present, otherwise None. """ - # Check if the response contains tool calls - if ( - self.is_tools_added() - and not self.model_type.support_native_tool_calling - and "" in response.choices[0].message.content - ): - parsed_content = self._parse_tool_response( - response.choices[0].message.content - ) - if parsed_content: - return ChatCompletionMessageToolCall( - id=str(uuid.uuid4()), - function=Function( - arguments=str(parsed_content["arguments"]).replace( - "'", '"' - ), - name=str(parsed_content["function"]), - ), - type="function", - ) - elif ( - self.is_tools_added() - and self.model_type.support_native_tool_calling - and response.choices[0].message.tool_calls - ): - return response.choices[0].message.tool_calls[0] + # # Check if the response contains tool calls + # if ( + # self.has_tools + # and not self.model_type.support_native_tool_calling + # and "" in response.choices[0].message.content + # ): + # parsed_content = self._parse_tool_response( + # response.choices[0].message.content + # ) + # if parsed_content: + # return ChatCompletionMessageToolCall( + # id=str(uuid.uuid4()), + # function=Function( + # arguments=str(parsed_content["arguments"]).replace( + # "'", '"' + # ), + # name=str(parsed_content["function"]), + # ), + # type="function", + # ) + # elif ( + # self.has_tools + # and self.model_type.support_native_tool_calling + # and response.choices[0].message.tool_calls + # ): + # return response.choices[0].message.tool_calls[0] # No tool call found return None @@ -817,7 +608,7 @@ def _is_standard_response(self, response: Any) -> bool: bool: `True` if the response is a standard reply, `False` otherwise. """ - if not self.is_tools_added(): + if not self.has_tools: return True if not isinstance(response, ChatCompletion): @@ -868,7 +659,7 @@ async def step_async( """ if isinstance(input_message, str): input_message = BaseMessage.make_user_message( - role_name='User', content=input_message + role_name="User", content=input_message ) self.update_memory(input_message, OpenAIBackendRole.USER) @@ -891,29 +682,12 @@ async def step_async( ) = self._step_model_response(openai_messages, num_tokens) if ( - not self.is_tools_added() + not self.has_tools or not isinstance(response, ChatCompletion) or not response.choices[0].message.tool_calls ): break - # Check for external tool call - external_tool_request = response.choices[0].message.tool_calls[0] - if external_tool_request.function.name in self.external_tool_names: - # if model calls an external tool, directly return the request - info = self._step_get_info( - output_messages, - finish_reasons, - usage_dict, - response_id, - tool_call_records, - num_tokens, - external_tool_request, - ) - return ChatAgentResponse( - msgs=output_messages, terminated=self.terminated, info=info - ) - # Normal function calling tool_call_records.append( await self._step_tool_call_and_update_async(response) @@ -996,6 +770,7 @@ async def _step_tool_call_and_update_async( return func_record + # TODO: Simplify format -> Schema def _structure_output_with_function( self, response_format: Type[BaseModel] ) -> Tuple[ @@ -1030,7 +805,7 @@ def _structure_output_with_function( original_model_dict = self.model_backend.model_config_dict # Replace the original tools with the structuring function - self.tool_dict = {func.get_function_name(): func} + self._tools = [func] self.model_backend.model_config_dict = original_model_dict.copy() self.model_backend.model_config_dict["tools"] = [ func.get_openai_tool_schema() @@ -1131,7 +906,6 @@ def _step_get_info( response_id: str, tool_calls: List[FunctionCallingRecord], num_tokens: int, - external_tool_request: Optional[ChatCompletionMessageToolCall] = None, ) -> Dict[str, Any]: r"""Process the output of a chat step and gather information about the step. @@ -1151,9 +925,6 @@ def _step_get_info( tool_calls (List[FunctionCallingRecord]): Records of function calls made during this step. num_tokens (int): The number of tokens used in this step. - external_tool_request (Optional[ChatCompletionMessageToolCall]): - Any external tool request made during this step. - (default: :obj:`None`) Returns: Dict[str, Any]: A dictionary containing information about the chat @@ -1189,7 +960,6 @@ def _step_get_info( finish_reasons, num_tokens, tool_calls, - external_tool_request, ) return info @@ -1213,9 +983,9 @@ def handle_batch_response( role_type=self.role_type, meta_dict=dict(), content=choice.message.content or "", - parsed=getattr(choice.message, 'parsed', None), + parsed=getattr(choice.message, "parsed", None), ) - # Process log probabilities and append to the message meta information + # Process log probabilities, append to the message meta information if choice.logprobs is not None: tokens_logprobs = choice.logprobs.content @@ -1267,10 +1037,10 @@ def _safe_model_dump(self, obj) -> dict: dict: A dictionary representation of the Pydantic model. """ # Check if the `model_dump` method exists (Pydantic v2) - if hasattr(obj, 'model_dump'): + if hasattr(obj, "model_dump"): return obj.model_dump() # Fallback to `dict()` method (Pydantic v1) - elif hasattr(obj, 'dict'): + elif hasattr(obj, "dict"): return obj.dict() else: raise TypeError("The object is not a Pydantic model") diff --git a/camel/models/anthropic_model.py b/camel/models/anthropic_model.py index 0b1e1827e9..12bdd61435 100644 --- a/camel/models/anthropic_model.py +++ b/camel/models/anthropic_model.py @@ -12,7 +12,9 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Type, Union + +from pydantic import BaseModel from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig from camel.messages import OpenAIMessage @@ -119,9 +121,11 @@ def count_tokens_from_prompt( ).input_tokens @api_keys_required("ANTHROPIC_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ): r"""Run inference of Anthropic chat completion. diff --git a/camel/models/azure_openai_model.py b/camel/models/azure_openai_model.py index afe9a63208..9f6a88b9a9 100644 --- a/camel/models/azure_openai_model.py +++ b/camel/models/azure_openai_model.py @@ -12,9 +12,10 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import AzureOpenAI, Stream +from pydantic import BaseModel from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig from camel.messages import OpenAIMessage @@ -108,9 +109,11 @@ def token_counter(self) -> BaseTokenCounter: return self._token_counter @api_keys_required("AZURE_OPENAI_API_KEY", "AZURE_API_VERSION") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Azure OpenAI chat completion. diff --git a/camel/models/base_model.py b/camel/models/base_model.py index 37e9d40616..5f7d5e10dc 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -12,9 +12,10 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import Stream +from pydantic import BaseModel from camel.messages import OpenAIMessage from camel.types import ( @@ -74,22 +75,43 @@ def token_counter(self) -> BaseTokenCounter: pass @abstractmethod + def _run( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + pass + def run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs the query to the backend model. Args: messages (List[OpenAIMessage]): Message list with the chat history in OpenAI API format. + response_format (Optional[Type[BaseModel]]): The response format + to use for the model. (default: :obj:`None`) + tools (Optional[List[Tool]]): The schema of tools to use for the + model for this request. Will override the tools specified in + the model configuration (but not change the configuration). + (default: :obj:`None`) Returns: Union[ChatCompletion, Stream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ - pass + response_format = ( + self.model_config_dict.get("response_format", None) + or response_format + ) + tools = self.model_config_dict.get("tools", None) or tools + return self._run(messages, response_format, tools) @abstractmethod def check_model_config(self): diff --git a/camel/models/cohere_model.py b/camel/models/cohere_model.py index 8376f42f7e..f3d7cc2794 100644 --- a/camel/models/cohere_model.py +++ b/camel/models/cohere_model.py @@ -16,7 +16,9 @@ import logging import os import uuid -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union + +from pydantic import BaseModel if TYPE_CHECKING: from cohere.types import ChatMessageV2, ChatResponse @@ -211,7 +213,12 @@ def token_counter(self) -> BaseTokenCounter: return self._token_counter @api_keys_required("COHERE_API_KEY") - def run(self, messages: List[OpenAIMessage]) -> ChatCompletion: + def _run( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, + ) -> ChatCompletion: r"""Runs inference of Cohere chat completion. Args: diff --git a/camel/models/deepseek_model.py b/camel/models/deepseek_model.py index bfa6483cde..dac353be7f 100644 --- a/camel/models/deepseek_model.py +++ b/camel/models/deepseek_model.py @@ -13,9 +13,10 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig from camel.messages import OpenAIMessage @@ -91,9 +92,11 @@ def token_counter(self) -> BaseTokenCounter: return self._token_counter @api_keys_required("DEEPSEEK_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of DeepSeek chat completion. diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index bc255bd1a3..89530dac12 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -12,9 +12,10 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import Gemini_API_PARAMS, GeminiConfig from camel.messages import OpenAIMessage @@ -78,9 +79,11 @@ def __init__( ) @api_keys_required("GEMINI_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Gemini chat completion. diff --git a/camel/models/groq_model.py b/camel/models/groq_model.py index 7eb0a477fd..90cf742895 100644 --- a/camel/models/groq_model.py +++ b/camel/models/groq_model.py @@ -15,6 +15,7 @@ from typing import Any, Dict, List, Optional, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import GROQ_API_PARAMS, GroqConfig from camel.messages import OpenAIMessage @@ -90,9 +91,11 @@ def token_counter(self) -> BaseTokenCounter: return self._token_counter @api_keys_required("GROQ_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/litellm_model.py b/camel/models/litellm_model.py index e06feab66b..a82c388700 100644 --- a/camel/models/litellm_model.py +++ b/camel/models/litellm_model.py @@ -11,7 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union + +from pydantic import BaseModel from camel.configs import LITELLM_API_PARAMS, LiteLLMConfig from camel.messages import OpenAIMessage @@ -106,9 +108,11 @@ def token_counter(self) -> BaseTokenCounter: self._token_counter = LiteLLMTokenCounter(self.model_type) return self._token_counter - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> ChatCompletion: r"""Runs inference of LiteLLM chat completion. diff --git a/camel/models/mistral_model.py b/camel/models/mistral_model.py index d95aa992b2..3cde5f5b9a 100644 --- a/camel/models/mistral_model.py +++ b/camel/models/mistral_model.py @@ -12,7 +12,9 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union + +from pydantic import BaseModel if TYPE_CHECKING: from mistralai.models import ( @@ -201,9 +203,11 @@ def token_counter(self) -> BaseTokenCounter: return self._token_counter @api_keys_required("MISTRAL_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> ChatCompletion: r"""Runs inference of Mistral chat completion. diff --git a/camel/models/model_manager.py b/camel/models/model_manager.py index 3e324d0ed0..83f39c0535 100644 --- a/camel/models/model_manager.py +++ b/camel/models/model_manager.py @@ -20,10 +20,13 @@ Callable, Dict, List, + Optional, + Type, Union, ) from openai import Stream +from pydantic import BaseModel from camel.messages import OpenAIMessage from camel.models.base_model import BaseModelBackend @@ -178,7 +181,10 @@ def random_model(self) -> BaseModelBackend: return choice(self.models) def run( - self, messages: List[OpenAIMessage] + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Process a list of messages by selecting a model based on the scheduling strategy. @@ -198,7 +204,7 @@ def run( # Pass all messages to the selected model and get the response try: - response = self.current_model.run(messages) + response = self.current_model.run(messages, response_format, tools) except Exception as exc: logger.error(f"Error processing with model: {self.current_model}") if self.scheduling_strategy == self.always_first: diff --git a/camel/models/nemotron_model.py b/camel/models/nemotron_model.py index dbc6d504b8..a5559434f4 100644 --- a/camel/models/nemotron_model.py +++ b/camel/models/nemotron_model.py @@ -12,9 +12,10 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import List, Optional, Union +from typing import List, Optional, Type, Union from openai import OpenAI +from pydantic import BaseModel from camel.messages import OpenAIMessage from camel.models import BaseModelBackend @@ -59,9 +60,11 @@ def __init__( ) @api_keys_required("NVIDIA_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> ChatCompletion: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/nvidia_model.py b/camel/models/nvidia_model.py index 47f071b508..900359fb3b 100644 --- a/camel/models/nvidia_model.py +++ b/camel/models/nvidia_model.py @@ -13,13 +13,14 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream from openai.types.chat import ( ChatCompletion, ChatCompletionChunk, ) +from pydantic import BaseModel from camel.configs import NVIDIA_API_PARAMS, NvidiaConfig from camel.messages import OpenAIMessage @@ -73,9 +74,11 @@ def __init__( ) @api_keys_required("NVIDIA_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of NVIDIA chat completion. diff --git a/camel/models/ollama_model.py b/camel/models/ollama_model.py index c0e2be59d5..8a2044e7ac 100644 --- a/camel/models/ollama_model.py +++ b/camel/models/ollama_model.py @@ -13,9 +13,10 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os import subprocess -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import OLLAMA_API_PARAMS, OllamaConfig from camel.messages import OpenAIMessage @@ -119,9 +120,11 @@ def check_model_config(self): "input into Ollama model backend." ) - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/openai_compatible_model.py b/camel/models/openai_compatible_model.py index 204c475ebc..4ecf6bcde2 100644 --- a/camel/models/openai_compatible_model.py +++ b/camel/models/openai_compatible_model.py @@ -13,9 +13,10 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.messages import OpenAIMessage from camel.models import BaseModelBackend @@ -67,9 +68,11 @@ def __init__( base_url=self._url, ) - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/openai_model.py b/camel/models/openai_model.py index e991bae4ad..8fd453e25b 100644 --- a/camel/models/openai_model.py +++ b/camel/models/openai_model.py @@ -13,9 +13,10 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os import warnings -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig from camel.messages import OpenAIMessage @@ -87,9 +88,11 @@ def token_counter(self) -> BaseTokenCounter: return self._token_counter @api_keys_required("OPENAI_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/qwen_model.py b/camel/models/qwen_model.py index 9135fa270a..a6ed7bf9bd 100644 --- a/camel/models/qwen_model.py +++ b/camel/models/qwen_model.py @@ -13,9 +13,10 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import QWEN_API_PARAMS, QwenConfig from camel.messages import OpenAIMessage @@ -78,9 +79,11 @@ def __init__( ) @api_keys_required("QWEN_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Qwen chat completion. diff --git a/camel/models/reka_model.py b/camel/models/reka_model.py index e182fd05ba..10a3db3381 100644 --- a/camel/models/reka_model.py +++ b/camel/models/reka_model.py @@ -11,7 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union + +from pydantic import BaseModel from camel.configs import REKA_API_PARAMS, RekaConfig from camel.messages import OpenAIMessage @@ -112,6 +114,8 @@ def _convert_reka_to_openai_response( def _convert_openai_to_reka_messages( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> List["ChatMessage"]: r"""Converts OpenAI API messages to Reka API messages. @@ -169,9 +173,11 @@ def token_counter(self) -> BaseTokenCounter: return self._token_counter @api_keys_required("REKA_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> ChatCompletion: r"""Runs inference of Mistral chat completion. diff --git a/camel/models/samba_model.py b/camel/models/samba_model.py index 1f4db42972..b74f31bfba 100644 --- a/camel/models/samba_model.py +++ b/camel/models/samba_model.py @@ -15,10 +15,11 @@ import os import time import uuid -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union import httpx from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import ( SAMBA_CLOUD_API_PARAMS, @@ -144,8 +145,11 @@ def check_model_config(self): ) @api_keys_required("SAMBA_API_KEY") - def run( # type: ignore[misc] - self, messages: List[OpenAIMessage] + def _run( # type: ignore[misc] + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs SambaNova's service. diff --git a/camel/models/sglang_model.py b/camel/models/sglang_model.py index 5369f3db5f..43f89a45e6 100644 --- a/camel/models/sglang_model.py +++ b/camel/models/sglang_model.py @@ -14,9 +14,10 @@ import logging import threading import time -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import SGLANG_API_PARAMS, SGLangConfig from camel.messages import OpenAIMessage @@ -178,9 +179,11 @@ def check_model_config(self): "input into SGLang model backend." ) - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/stub_model.py b/camel/models/stub_model.py index e85e1298fb..31d1f80ac0 100644 --- a/camel/models/stub_model.py +++ b/camel/models/stub_model.py @@ -12,9 +12,10 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import time -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import Stream +from pydantic import BaseModel from camel.messages import OpenAIMessage from camel.models import BaseModelBackend @@ -74,8 +75,11 @@ def token_counter(self) -> BaseTokenCounter: self._token_counter = StubTokenCounter() return self._token_counter - def run( - self, messages: List[OpenAIMessage] + def _run( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Run fake inference by returning a fixed string. All arguments are unused for the dummy model. diff --git a/camel/models/togetherai_model.py b/camel/models/togetherai_model.py index 2fe5efd3c9..797d32d91d 100644 --- a/camel/models/togetherai_model.py +++ b/camel/models/togetherai_model.py @@ -13,9 +13,10 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import TOGETHERAI_API_PARAMS, TogetherAIConfig from camel.messages import OpenAIMessage @@ -79,9 +80,11 @@ def __init__( ) @api_keys_required("TOGETHER_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/vllm_model.py b/camel/models/vllm_model.py index 7493f5309f..676aede8cd 100644 --- a/camel/models/vllm_model.py +++ b/camel/models/vllm_model.py @@ -13,9 +13,10 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os import subprocess -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import VLLM_API_PARAMS, VLLMConfig from camel.messages import OpenAIMessage @@ -121,9 +122,11 @@ def check_model_config(self): "input into vLLM model backend." ) - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/yi_model.py b/camel/models/yi_model.py index 5b195bb5cb..7843b3bab5 100644 --- a/camel/models/yi_model.py +++ b/camel/models/yi_model.py @@ -13,9 +13,10 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import YI_API_PARAMS, YiConfig from camel.messages import OpenAIMessage @@ -77,9 +78,11 @@ def __init__( ) @api_keys_required("YI_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Yi chat completion. diff --git a/camel/models/zhipuai_model.py b/camel/models/zhipuai_model.py index b360f67be0..86a8ccf629 100644 --- a/camel/models/zhipuai_model.py +++ b/camel/models/zhipuai_model.py @@ -13,9 +13,10 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI, Stream +from pydantic import BaseModel from camel.configs import ZHIPUAI_API_PARAMS, ZhipuAIConfig from camel.messages import OpenAIMessage @@ -77,9 +78,11 @@ def __init__( ) @api_keys_required("ZHIPUAI_API_KEY") - def run( + def _run( self, messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[str]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/examples/models/nemotron_model_example.py b/examples/models/nemotron_model_example.py index 0b465d0568..d9067ad5dc 100644 --- a/examples/models/nemotron_model_example.py +++ b/examples/models/nemotron_model_example.py @@ -26,7 +26,7 @@ }, ] -ans = nemotron.run(message) +ans = nemotron._run(message) print(ans) ''' =============================================================================== diff --git a/test/agents/test_chat_agent.py b/test/agents/test_chat_agent.py index 3586b7ba19..cad8736f9d 100644 --- a/test/agents/test_chat_agent.py +++ b/test/agents/test_chat_agent.py @@ -335,7 +335,7 @@ def test_chat_agent_step_with_external_tools(step_call_count=3): ), ) - model.run = MagicMock( + model._run = MagicMock( side_effect=[model_backend_external1, model_backend_external2] * step_call_count ) @@ -476,7 +476,7 @@ def test_chat_agent_multiple_return_messages(n, step_call_count=3): total_tokens=47, ), ) - model.run = MagicMock(return_value=model_backend_rsp_tool) + model._run = MagicMock(return_value=model_backend_rsp_tool) system_msg = BaseMessage( "Assistant", @@ -542,7 +542,7 @@ def test_chat_agent_multiple_return_message_error(n, step_call_count=3): ), ) ) - model.run = MagicMock(return_value=model_backend_multi_messages) + model._run = MagicMock(return_value=model_backend_multi_messages) system_msg = BaseMessage( "Assistant", @@ -594,7 +594,7 @@ def test_chat_agent_stream_output(step_call_count=3): model_type=ModelType.GPT_4O_MINI, model_config_dict=stream_model_config.as_dict(), ) - model.run = MagicMock(return_value=model_backend_rsp_base) + model._run = MagicMock(return_value=model_backend_rsp_base) stream_assistant = ChatAgent(system_msg, model=model) stream_assistant.reset() for i in range(step_call_count): @@ -845,7 +845,7 @@ def test_tool_calling_sync(step_call_count=3): ), ) - model.run = MagicMock( + model._run = MagicMock( side_effect=[ model_backend_rsp_tool, model_backend_rsp_tool1, @@ -971,7 +971,7 @@ async def test_tool_calling_math_async(step_call_count=3): ), ) - model.run = MagicMock( + model._run = MagicMock( side_effect=[ model_backend_rsp_tool, model_backend_rsp_tool1, @@ -1026,7 +1026,7 @@ async def async_sleep(second: int) -> int: # Mock tool calling def mock_run_tool_calling_async(*args, **kwargs): # Reset tool_calls at the beginning of each new round of step() call - if model.run.call_count % 2 == 1: + if model._run.call_count % 2 == 1: model_backend_rsp_tool_async.choices[0].message.tool_calls = [ ChatCompletionMessageToolCall( id='call_mock_123456', @@ -1049,7 +1049,7 @@ def mock_run_tool_calling_async(*args, **kwargs): return model_backend_rsp_tool_async - model.run = MagicMock(side_effect=mock_run_tool_calling_async) + model._run = MagicMock(side_effect=mock_run_tool_calling_async) agent = ChatAgent( system_message=system_message, diff --git a/test/agents/test_role_playing.py b/test/agents/test_role_playing.py index 5a12808b91..a5c66aeb8f 100644 --- a/test/agents/test_role_playing.py +++ b/test/agents/test_role_playing.py @@ -148,7 +148,7 @@ def test_role_playing_step( step_call_count=3, ): if model is not None: - model.run = MagicMock(return_value=model_backend_rsp) + model._run = MagicMock(return_value=model_backend_rsp) role_playing = RolePlaying( assistant_role_name="AI Assistant", @@ -196,7 +196,7 @@ def test_role_playing_step( @pytest.mark.model_backend def test_role_playing_with_function(step_call_count=3): if model is not None: - model.run = MagicMock(return_value=model_backend_rsp) + model._run = MagicMock(return_value=model_backend_rsp) tools = MathToolkit().get_tools() diff --git a/test/models/test_model_factory.py b/test/models/test_model_factory.py index 88042faa21..bc0d77346b 100644 --- a/test/models/test_model_factory.py +++ b/test/models/test_model_factory.py @@ -138,7 +138,7 @@ def test_model_factory(model_platform, model_type): "content": "Hello", }, ] - response = model_inst.run(messages).model_dump() + response = model_inst._run(messages).model_dump() assert isinstance(response, dict) assert 'id' in response assert isinstance(response['id'], str) diff --git a/test/models/test_model_manager.py b/test/models/test_model_manager.py index 8ed79678b8..53027fbfe2 100644 --- a/test/models/test_model_manager.py +++ b/test/models/test_model_manager.py @@ -68,8 +68,8 @@ def test_model_manager( assert model_manager.scheduling_strategy.__name__ == "round_robin" for model in model_manager.models: if TYPE_CHECKING: - assert isinstance(model.run, Mock) - assert model.run.call_count == times_each_model_called + assert isinstance(model._run, Mock) + assert model._run.call_count == times_each_model_called if strategy == "always_first": assert model_manager.scheduling_strategy.__name__ == "always_first" assert models[0].run.call_count == times_each_model_called @@ -79,8 +79,8 @@ def test_model_manager( total_calls = 0 for model in model_manager.models: if TYPE_CHECKING: - assert isinstance(model.run, Mock) - total_calls += model.run.call_count + assert isinstance(model._run, Mock) + total_calls += model._run.call_count assert total_calls == times_each_model_called From a5333398a37f9b5387bff6f73f65afadf92bcaa5 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Fri, 27 Dec 2024 22:58:11 -0600 Subject: [PATCH 02/28] openai refactored --- camel/configs/openai_config.py | 22 +- camel/models/anthropic_model.py | 2 +- camel/models/azure_openai_model.py | 2 +- camel/models/base_model.py | 6 +- camel/models/cohere_model.py | 2 +- camel/models/deepseek_model.py | 2 +- camel/models/gemini_model.py | 2 +- camel/models/groq_model.py | 2 +- camel/models/litellm_model.py | 2 +- camel/models/mistral_model.py | 2 +- camel/models/model_manager.py | 2 +- camel/models/nemotron_model.py | 4 +- camel/models/nvidia_model.py | 2 +- camel/models/ollama_model.py | 2 +- camel/models/openai_compatible_model.py | 2 +- camel/models/openai_model.py | 96 ++-- camel/models/qwen_model.py | 2 +- camel/models/reka_model.py | 2 +- camel/models/samba_model.py | 2 +- camel/models/sglang_model.py | 2 +- camel/models/stub_model.py | 2 +- camel/models/togetherai_model.py | 2 +- camel/models/vllm_model.py | 2 +- camel/models/yi_model.py | 2 +- camel/models/zhipuai_model.py | 2 +- camel/types/__init__.py | 4 +- camel/types/openai_types.py | 28 +- poetry.lock | 641 +++++++++++------------- 28 files changed, 395 insertions(+), 448 deletions(-) diff --git a/camel/configs/openai_config.py b/camel/configs/openai_config.py index 71b66ac972..9397e98e17 100644 --- a/camel/configs/openai_config.py +++ b/camel/configs/openai_config.py @@ -13,12 +13,12 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from __future__ import annotations -from typing import Any, Optional, Sequence, Type, Union +from typing import Any, Dict, Optional, Sequence, Type, Union from pydantic import BaseModel, Field from camel.configs.base_config import BaseConfig -from camel.types import NOT_GIVEN, NotGiven +from camel.toolkits import FunctionTool class ChatGPTConfig(BaseConfig): @@ -101,29 +101,27 @@ class ChatGPTConfig(BaseConfig): top_p: float = 1.0 n: int = 1 stream: bool = False - stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN - max_tokens: Union[int, NotGiven] = NOT_GIVEN + stop: Optional[Union[str, Sequence[str]]] = None + max_tokens: Optional[int] = None presence_penalty: float = 0.0 - response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN + response_format: Optional[Union[Type[BaseModel], Dict]] = None frequency_penalty: float = 0.0 - logit_bias: dict = Field(default_factory=dict) + logit_bias: Dict = Field(default_factory=dict) user: str = "" - tool_choice: Optional[Union[dict[str, str], str]] = None + tool_choice: Optional[Union[Dict[str, str], str]] = None - def as_dict(self) -> dict[str, Any]: + def as_dict(self) -> Dict[str, Any]: r"""Convert the current configuration to a dictionary. This method converts the current configuration object to a dictionary representation, which can be used for serialization or other purposes. Returns: - dict[str, Any]: A dictionary representation of the current + Dict[str, Any]: A dictionary representation of the current configuration. """ config_dict = self.model_dump() if self.tools: - from camel.toolkits import FunctionTool - tools_schema = [] for tool in self.tools: if not isinstance(tool, FunctionTool): @@ -132,7 +130,7 @@ def as_dict(self) -> dict[str, Any]: "be an instance of `FunctionTool`." ) tools_schema.append(tool.get_openai_tool_schema()) - config_dict["tools"] = NOT_GIVEN + config_dict["tools"] = tools_schema return config_dict diff --git a/camel/models/anthropic_model.py b/camel/models/anthropic_model.py index 12bdd61435..cfa8fbc5e3 100644 --- a/camel/models/anthropic_model.py +++ b/camel/models/anthropic_model.py @@ -125,7 +125,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ): r"""Run inference of Anthropic chat completion. diff --git a/camel/models/azure_openai_model.py b/camel/models/azure_openai_model.py index 9f6a88b9a9..0771c1b694 100644 --- a/camel/models/azure_openai_model.py +++ b/camel/models/azure_openai_model.py @@ -113,7 +113,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Azure OpenAI chat completion. diff --git a/camel/models/base_model.py b/camel/models/base_model.py index 5f7d5e10dc..909b5deab1 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -78,8 +78,8 @@ def token_counter(self) -> BaseTokenCounter: def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: pass @@ -87,7 +87,7 @@ def run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs the query to the backend model. diff --git a/camel/models/cohere_model.py b/camel/models/cohere_model.py index f3d7cc2794..36d2d602a6 100644 --- a/camel/models/cohere_model.py +++ b/camel/models/cohere_model.py @@ -217,7 +217,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> ChatCompletion: r"""Runs inference of Cohere chat completion. diff --git a/camel/models/deepseek_model.py b/camel/models/deepseek_model.py index dac353be7f..f00250b5d5 100644 --- a/camel/models/deepseek_model.py +++ b/camel/models/deepseek_model.py @@ -96,7 +96,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of DeepSeek chat completion. diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index 89530dac12..ebd5e7cb8a 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -83,7 +83,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Gemini chat completion. diff --git a/camel/models/groq_model.py b/camel/models/groq_model.py index 90cf742895..e56ef0c4a3 100644 --- a/camel/models/groq_model.py +++ b/camel/models/groq_model.py @@ -95,7 +95,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/litellm_model.py b/camel/models/litellm_model.py index a82c388700..e3550cd80c 100644 --- a/camel/models/litellm_model.py +++ b/camel/models/litellm_model.py @@ -112,7 +112,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> ChatCompletion: r"""Runs inference of LiteLLM chat completion. diff --git a/camel/models/mistral_model.py b/camel/models/mistral_model.py index 3cde5f5b9a..1c02910c14 100644 --- a/camel/models/mistral_model.py +++ b/camel/models/mistral_model.py @@ -207,7 +207,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> ChatCompletion: r"""Runs inference of Mistral chat completion. diff --git a/camel/models/model_manager.py b/camel/models/model_manager.py index 83f39c0535..f30babf2ba 100644 --- a/camel/models/model_manager.py +++ b/camel/models/model_manager.py @@ -184,7 +184,7 @@ def run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Process a list of messages by selecting a model based on the scheduling strategy. diff --git a/camel/models/nemotron_model.py b/camel/models/nemotron_model.py index a5559434f4..96194d5d06 100644 --- a/camel/models/nemotron_model.py +++ b/camel/models/nemotron_model.py @@ -12,7 +12,7 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os -from typing import List, Optional, Type, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import OpenAI from pydantic import BaseModel @@ -64,7 +64,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> ChatCompletion: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/nvidia_model.py b/camel/models/nvidia_model.py index 900359fb3b..44bdd5a1ef 100644 --- a/camel/models/nvidia_model.py +++ b/camel/models/nvidia_model.py @@ -78,7 +78,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of NVIDIA chat completion. diff --git a/camel/models/ollama_model.py b/camel/models/ollama_model.py index 8a2044e7ac..e8de2d95d2 100644 --- a/camel/models/ollama_model.py +++ b/camel/models/ollama_model.py @@ -124,7 +124,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/openai_compatible_model.py b/camel/models/openai_compatible_model.py index 4ecf6bcde2..4e2da8855b 100644 --- a/camel/models/openai_compatible_model.py +++ b/camel/models/openai_compatible_model.py @@ -72,7 +72,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/openai_model.py b/camel/models/openai_model.py index 8fd453e25b..300a5b5921 100644 --- a/camel/models/openai_model.py +++ b/camel/models/openai_model.py @@ -22,7 +22,6 @@ from camel.messages import OpenAIMessage from camel.models import BaseModelBackend from camel.types import ( - NOT_GIVEN, ChatCompletion, ChatCompletionChunk, ModelType, @@ -33,6 +32,16 @@ api_keys_required, ) +O1_UNSUPPORTED_PARAMS = [ + "temperature", + "top_p", + "presence_penalty", + "frequency_penalty", + "logprobs", + "top_logprobs", + "logit_bias", +] + class OpenAIModel(BaseModelBackend): r"""OpenAI API in a unified BaseModelBackend interface. @@ -65,9 +74,13 @@ def __init__( model_config_dict = ChatGPTConfig().as_dict() api_key = api_key or os.environ.get("OPENAI_API_KEY") url = url or os.environ.get("OPENAI_API_BASE_URL") + super().__init__( model_type, model_config_dict, api_key, url, token_counter ) + + self._sanitize_model_config() + self._client = OpenAI( timeout=180, max_retries=3, @@ -75,6 +88,25 @@ def __init__( api_key=self._api_key, ) + def _sanitize_model_config(self) -> None: + """Sanitize the model configuration for O1 models.""" + if self.model_type in [ + ModelType.O1, + ModelType.O1_MINI, + ModelType.O1_PREVIEW, + ]: + warnings.warn( + "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), " + "which has certain limitations, reference: " + "`https://platform.openai.com/docs/guides/reasoning`.", + UserWarning, + ) + self.model_config_dict = { + k: v + for k, v in self.model_config_dict.items() + if k not in O1_UNSUPPORTED_PARAMS + } + @property def token_counter(self) -> BaseTokenCounter: r"""Initialize the token counter for the model backend. @@ -91,74 +123,46 @@ def token_counter(self) -> BaseTokenCounter: def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. Args: messages (List[OpenAIMessage]): Message list with the chat history in OpenAI API format. + response_format (Optional[Type[BaseModel]]): The format of the + response. + tools (Optional[List[str]]): The schema of the tools to use for the + request. Returns: Union[ChatCompletion, Stream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ - # o1-preview and o1-mini have Beta limitations - # reference: https://platform.openai.com/docs/guides/reasoning - if self.model_type in [ - ModelType.O1, - ModelType.O1_MINI, - ModelType.O1_PREVIEW, - ]: - warnings.warn( - "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), " - "which has certain limitations, reference: " - "`https://platform.openai.com/docs/guides/reasoning`.", - UserWarning, - ) + request_config = self.model_config_dict.copy() - # Check and remove unsupported parameters and reset the fixed - # parameters - unsupported_keys = [ - "temperature", - "top_p", - "presence_penalty", - "frequency_penalty", - "logprobs", - "top_logprobs", - "logit_bias", - ] - for key in unsupported_keys: - if key in self.model_config_dict: - del self.model_config_dict[key] - - if self.model_config_dict.get("response_format"): - # stream is not supported in beta.chat.completions.parse - if "stream" in self.model_config_dict: - del self.model_config_dict["stream"] + if tools: + for tool in tools: + function_dict = tool.get('function', {}) + function_dict.pop("strict", None) + request_config["tools"] = tools + if response_format: + request_config["response_format"] = response_format + request_config.pop("stream", None) response = self._client.beta.chat.completions.parse( messages=messages, model=self.model_type, - **self.model_config_dict, + **request_config, ) - return self._to_chat_completion(response) - # Removing 'strict': True from the dictionary for - # client.chat.completions.create - if self.model_config_dict.get('tools') is not NOT_GIVEN: - for tool in self.model_config_dict.get('tools', []): - function_dict = tool.get('function', {}) - if 'strict' in function_dict: - del function_dict['strict'] - response = self._client.chat.completions.create( messages=messages, model=self.model_type, - **self.model_config_dict, + **request_config, ) return response diff --git a/camel/models/qwen_model.py b/camel/models/qwen_model.py index a6ed7bf9bd..56ac2c768d 100644 --- a/camel/models/qwen_model.py +++ b/camel/models/qwen_model.py @@ -83,7 +83,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Qwen chat completion. diff --git a/camel/models/reka_model.py b/camel/models/reka_model.py index 10a3db3381..d27a8f1856 100644 --- a/camel/models/reka_model.py +++ b/camel/models/reka_model.py @@ -177,7 +177,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> ChatCompletion: r"""Runs inference of Mistral chat completion. diff --git a/camel/models/samba_model.py b/camel/models/samba_model.py index b74f31bfba..57dc5cd3d9 100644 --- a/camel/models/samba_model.py +++ b/camel/models/samba_model.py @@ -149,7 +149,7 @@ def _run( # type: ignore[misc] self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs SambaNova's service. diff --git a/camel/models/sglang_model.py b/camel/models/sglang_model.py index 43f89a45e6..d7a2838485 100644 --- a/camel/models/sglang_model.py +++ b/camel/models/sglang_model.py @@ -183,7 +183,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/stub_model.py b/camel/models/stub_model.py index 31d1f80ac0..d4f3180ba2 100644 --- a/camel/models/stub_model.py +++ b/camel/models/stub_model.py @@ -79,7 +79,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Run fake inference by returning a fixed string. All arguments are unused for the dummy model. diff --git a/camel/models/togetherai_model.py b/camel/models/togetherai_model.py index 797d32d91d..4c17f2c742 100644 --- a/camel/models/togetherai_model.py +++ b/camel/models/togetherai_model.py @@ -84,7 +84,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/vllm_model.py b/camel/models/vllm_model.py index 676aede8cd..84d5d2589b 100644 --- a/camel/models/vllm_model.py +++ b/camel/models/vllm_model.py @@ -126,7 +126,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/models/yi_model.py b/camel/models/yi_model.py index 7843b3bab5..90863e3750 100644 --- a/camel/models/yi_model.py +++ b/camel/models/yi_model.py @@ -82,7 +82,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Yi chat completion. diff --git a/camel/models/zhipuai_model.py b/camel/models/zhipuai_model.py index 86a8ccf629..598012e212 100644 --- a/camel/models/zhipuai_model.py +++ b/camel/models/zhipuai_model.py @@ -82,7 +82,7 @@ def _run( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[str]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. diff --git a/camel/types/__init__.py b/camel/types/__init__.py index 3904dc2615..dddcc43cc3 100644 --- a/camel/types/__init__.py +++ b/camel/types/__init__.py @@ -71,8 +71,8 @@ 'AudioModelType', 'VoiceType', 'UnifiedModelType', - 'NOT_GIVEN', - 'NotGiven', 'ParsedChatCompletion', 'HuggingFaceRepoType', + 'NOT_GIVEN', + 'NotGiven', ] diff --git a/camel/types/openai_types.py b/camel/types/openai_types.py index e0c56b8d88..3a16375a5c 100644 --- a/camel/types/openai_types.py +++ b/camel/types/openai_types.py @@ -34,16 +34,18 @@ from openai.types.chat import ParsedChatCompletion from openai._types import NOT_GIVEN, NotGiven -Choice = Choice -ChatCompletion = ChatCompletion -ChatCompletionChunk = ChatCompletionChunk -ChatCompletionMessage = ChatCompletionMessage -ChatCompletionMessageParam = ChatCompletionMessageParam -ChatCompletionSystemMessageParam = ChatCompletionSystemMessageParam -ChatCompletionUserMessageParam = ChatCompletionUserMessageParam -ChatCompletionAssistantMessageParam = ChatCompletionAssistantMessageParam -ChatCompletionFunctionMessageParam = ChatCompletionFunctionMessageParam -CompletionUsage = CompletionUsage -NOT_GIVEN = NOT_GIVEN -NotGiven = NotGiven -ParsedChatCompletion = ParsedChatCompletion +__all__ = [ + "Choice", + "ChatCompletion", + "ChatCompletionChunk", + "ChatCompletionMessage", + "ChatCompletionMessageParam", + "ChatCompletionSystemMessageParam", + "ChatCompletionUserMessageParam", + "ChatCompletionAssistantMessageParam", + "ChatCompletionFunctionMessageParam", + "CompletionUsage", + "ParsedChatCompletion", + "NOT_GIVEN", + "NotGiven", +] diff --git a/poetry.lock b/poetry.lock index 00d3d41906..8d2b98a884 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "accelerate" @@ -675,13 +675,13 @@ css = ["tinycss2 (>=1.1.0,<1.5)"] [[package]] name = "botocore" -version = "1.35.86" +version = "1.35.88" description = "Low-level, data-driven core of boto 3." optional = true python-versions = ">=3.8" files = [ - {file = "botocore-1.35.86-py3-none-any.whl", hash = "sha256:77cb4b445e4f424f956c68c688bd3ad527f4d214d51d67ffc8e245f4476d7de0"}, - {file = "botocore-1.35.86.tar.gz", hash = "sha256:951e944eb30284b4593d4da98f70f7b5292ea237e4de0c5a2852946a549b8347"}, + {file = "botocore-1.35.88-py3-none-any.whl", hash = "sha256:e60cc3fbe8d7a10f70e7e852d76be2b29f23ead418a5899d366ea32b1eacb5a5"}, + {file = "botocore-1.35.88.tar.gz", hash = "sha256:58dcd9a464c354b8c6c25261d8de830d175d9739eae568bf0c52e57116fb03c6"}, ] [package.dependencies] @@ -817,116 +817,103 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.4.0" +version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.7" files = [ - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, - {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, - {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] [[package]] @@ -1097,73 +1084,73 @@ test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist" [[package]] name = "coverage" -version = "7.6.9" +version = "7.6.10" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" files = [ - {file = "coverage-7.6.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85d9636f72e8991a1706b2b55b06c27545448baf9f6dbf51c4004609aacd7dcb"}, - {file = "coverage-7.6.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:608a7fd78c67bee8936378299a6cb9f5149bb80238c7a566fc3e6717a4e68710"}, - {file = "coverage-7.6.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96d636c77af18b5cb664ddf12dab9b15a0cfe9c0bde715da38698c8cea748bfa"}, - {file = "coverage-7.6.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75cded8a3cff93da9edc31446872d2997e327921d8eed86641efafd350e1df1"}, - {file = "coverage-7.6.9-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7b15f589593110ae767ce997775d645b47e5cbbf54fd322f8ebea6277466cec"}, - {file = "coverage-7.6.9-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:44349150f6811b44b25574839b39ae35291f6496eb795b7366fef3bd3cf112d3"}, - {file = "coverage-7.6.9-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d891c136b5b310d0e702e186d70cd16d1119ea8927347045124cb286b29297e5"}, - {file = "coverage-7.6.9-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:db1dab894cc139f67822a92910466531de5ea6034ddfd2b11c0d4c6257168073"}, - {file = "coverage-7.6.9-cp310-cp310-win32.whl", hash = "sha256:41ff7b0da5af71a51b53f501a3bac65fb0ec311ebed1632e58fc6107f03b9198"}, - {file = "coverage-7.6.9-cp310-cp310-win_amd64.whl", hash = "sha256:35371f8438028fdccfaf3570b31d98e8d9eda8bb1d6ab9473f5a390969e98717"}, - {file = "coverage-7.6.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:932fc826442132dde42ee52cf66d941f581c685a6313feebed358411238f60f9"}, - {file = "coverage-7.6.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:085161be5f3b30fd9b3e7b9a8c301f935c8313dcf928a07b116324abea2c1c2c"}, - {file = "coverage-7.6.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccc660a77e1c2bf24ddbce969af9447a9474790160cfb23de6be4fa88e3951c7"}, - {file = "coverage-7.6.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c69e42c892c018cd3c8d90da61d845f50a8243062b19d228189b0224150018a9"}, - {file = "coverage-7.6.9-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0824a28ec542a0be22f60c6ac36d679e0e262e5353203bea81d44ee81fe9c6d4"}, - {file = "coverage-7.6.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4401ae5fc52ad8d26d2a5d8a7428b0f0c72431683f8e63e42e70606374c311a1"}, - {file = "coverage-7.6.9-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:98caba4476a6c8d59ec1eb00c7dd862ba9beca34085642d46ed503cc2d440d4b"}, - {file = "coverage-7.6.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ee5defd1733fd6ec08b168bd4f5387d5b322f45ca9e0e6c817ea6c4cd36313e3"}, - {file = "coverage-7.6.9-cp311-cp311-win32.whl", hash = "sha256:f2d1ec60d6d256bdf298cb86b78dd715980828f50c46701abc3b0a2b3f8a0dc0"}, - {file = "coverage-7.6.9-cp311-cp311-win_amd64.whl", hash = "sha256:0d59fd927b1f04de57a2ba0137166d31c1a6dd9e764ad4af552912d70428c92b"}, - {file = "coverage-7.6.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:99e266ae0b5d15f1ca8d278a668df6f51cc4b854513daab5cae695ed7b721cf8"}, - {file = "coverage-7.6.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9901d36492009a0a9b94b20e52ebfc8453bf49bb2b27bca2c9706f8b4f5a554a"}, - {file = "coverage-7.6.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abd3e72dd5b97e3af4246cdada7738ef0e608168de952b837b8dd7e90341f015"}, - {file = "coverage-7.6.9-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff74026a461eb0660366fb01c650c1d00f833a086b336bdad7ab00cc952072b3"}, - {file = "coverage-7.6.9-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65dad5a248823a4996724a88eb51d4b31587aa7aa428562dbe459c684e5787ae"}, - {file = "coverage-7.6.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:22be16571504c9ccea919fcedb459d5ab20d41172056206eb2994e2ff06118a4"}, - {file = "coverage-7.6.9-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f957943bc718b87144ecaee70762bc2bc3f1a7a53c7b861103546d3a403f0a6"}, - {file = "coverage-7.6.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ae1387db4aecb1f485fb70a6c0148c6cdaebb6038f1d40089b1fc84a5db556f"}, - {file = "coverage-7.6.9-cp312-cp312-win32.whl", hash = "sha256:1a330812d9cc7ac2182586f6d41b4d0fadf9be9049f350e0efb275c8ee8eb692"}, - {file = "coverage-7.6.9-cp312-cp312-win_amd64.whl", hash = "sha256:b12c6b18269ca471eedd41c1b6a1065b2f7827508edb9a7ed5555e9a56dcfc97"}, - {file = "coverage-7.6.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:899b8cd4781c400454f2f64f7776a5d87bbd7b3e7f7bda0cb18f857bb1334664"}, - {file = "coverage-7.6.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:61f70dc68bd36810972e55bbbe83674ea073dd1dcc121040a08cdf3416c5349c"}, - {file = "coverage-7.6.9-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a289d23d4c46f1a82d5db4abeb40b9b5be91731ee19a379d15790e53031c014"}, - {file = "coverage-7.6.9-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e216d8044a356fc0337c7a2a0536d6de07888d7bcda76febcb8adc50bdbbd00"}, - {file = "coverage-7.6.9-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c026eb44f744acaa2bda7493dad903aa5bf5fc4f2554293a798d5606710055d"}, - {file = "coverage-7.6.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e77363e8425325384f9d49272c54045bbed2f478e9dd698dbc65dbc37860eb0a"}, - {file = "coverage-7.6.9-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:777abfab476cf83b5177b84d7486497e034eb9eaea0d746ce0c1268c71652077"}, - {file = "coverage-7.6.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:447af20e25fdbe16f26e84eb714ba21d98868705cb138252d28bc400381f6ffb"}, - {file = "coverage-7.6.9-cp313-cp313-win32.whl", hash = "sha256:d872ec5aeb086cbea771c573600d47944eea2dcba8be5f3ee649bfe3cb8dc9ba"}, - {file = "coverage-7.6.9-cp313-cp313-win_amd64.whl", hash = "sha256:fd1213c86e48dfdc5a0cc676551db467495a95a662d2396ecd58e719191446e1"}, - {file = "coverage-7.6.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ba9e7484d286cd5a43744e5f47b0b3fb457865baf07bafc6bee91896364e1419"}, - {file = "coverage-7.6.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e5ea1cf0872ee455c03e5674b5bca5e3e68e159379c1af0903e89f5eba9ccc3a"}, - {file = "coverage-7.6.9-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d10e07aa2b91835d6abec555ec8b2733347956991901eea6ffac295f83a30e4"}, - {file = "coverage-7.6.9-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13a9e2d3ee855db3dd6ea1ba5203316a1b1fd8eaeffc37c5b54987e61e4194ae"}, - {file = "coverage-7.6.9-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c38bf15a40ccf5619fa2fe8f26106c7e8e080d7760aeccb3722664c8656b030"}, - {file = "coverage-7.6.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d5275455b3e4627c8e7154feaf7ee0743c2e7af82f6e3b561967b1cca755a0be"}, - {file = "coverage-7.6.9-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8f8770dfc6e2c6a2d4569f411015c8d751c980d17a14b0530da2d7f27ffdd88e"}, - {file = "coverage-7.6.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8d2dfa71665a29b153a9681edb1c8d9c1ea50dfc2375fb4dac99ea7e21a0bcd9"}, - {file = "coverage-7.6.9-cp313-cp313t-win32.whl", hash = "sha256:5e6b86b5847a016d0fbd31ffe1001b63355ed309651851295315031ea7eb5a9b"}, - {file = "coverage-7.6.9-cp313-cp313t-win_amd64.whl", hash = "sha256:97ddc94d46088304772d21b060041c97fc16bdda13c6c7f9d8fcd8d5ae0d8611"}, - {file = "coverage-7.6.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:adb697c0bd35100dc690de83154627fbab1f4f3c0386df266dded865fc50a902"}, - {file = "coverage-7.6.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:be57b6d56e49c2739cdf776839a92330e933dd5e5d929966fbbd380c77f060be"}, - {file = "coverage-7.6.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1592791f8204ae9166de22ba7e6705fa4ebd02936c09436a1bb85aabca3e599"}, - {file = "coverage-7.6.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e12ae8cc979cf83d258acb5e1f1cf2f3f83524d1564a49d20b8bec14b637f08"}, - {file = "coverage-7.6.9-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb5555cff66c4d3d6213a296b360f9e1a8e323e74e0426b6c10ed7f4d021e464"}, - {file = "coverage-7.6.9-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b9389a429e0e5142e69d5bf4a435dd688c14478a19bb901735cdf75e57b13845"}, - {file = "coverage-7.6.9-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:592ac539812e9b46046620341498caf09ca21023c41c893e1eb9dbda00a70cbf"}, - {file = "coverage-7.6.9-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a27801adef24cc30871da98a105f77995e13a25a505a0161911f6aafbd66e678"}, - {file = "coverage-7.6.9-cp39-cp39-win32.whl", hash = "sha256:8e3c3e38930cfb729cb8137d7f055e5a473ddaf1217966aa6238c88bd9fd50e6"}, - {file = "coverage-7.6.9-cp39-cp39-win_amd64.whl", hash = "sha256:e28bf44afa2b187cc9f41749138a64435bf340adfcacb5b2290c070ce99839d4"}, - {file = "coverage-7.6.9-pp39.pp310-none-any.whl", hash = "sha256:f3ca78518bc6bc92828cd11867b121891d75cae4ea9e908d72030609b996db1b"}, - {file = "coverage-7.6.9.tar.gz", hash = "sha256:4a8d8977b0c6ef5aeadcb644da9e69ae0dcfe66ec7f368c89c72e058bd71164d"}, + {file = "coverage-7.6.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c912978f7fbf47ef99cec50c4401340436d200d41d714c7a4766f377c5b7b78"}, + {file = "coverage-7.6.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a01ec4af7dfeb96ff0078ad9a48810bb0cc8abcb0115180c6013a6b26237626c"}, + {file = "coverage-7.6.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b204c11e2b2d883946fe1d97f89403aa1811df28ce0447439178cc7463448a"}, + {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32ee6d8491fcfc82652a37109f69dee9a830e9379166cb73c16d8dc5c2915165"}, + {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675cefc4c06e3b4c876b85bfb7c59c5e2218167bbd4da5075cbe3b5790a28988"}, + {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f4f620668dbc6f5e909a0946a877310fb3d57aea8198bde792aae369ee1c23b5"}, + {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4eea95ef275de7abaef630c9b2c002ffbc01918b726a39f5a4353916ec72d2f3"}, + {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e2f0280519e42b0a17550072861e0bc8a80a0870de260f9796157d3fca2733c5"}, + {file = "coverage-7.6.10-cp310-cp310-win32.whl", hash = "sha256:bc67deb76bc3717f22e765ab3e07ee9c7a5e26b9019ca19a3b063d9f4b874244"}, + {file = "coverage-7.6.10-cp310-cp310-win_amd64.whl", hash = "sha256:0f460286cb94036455e703c66988851d970fdfd8acc2a1122ab7f4f904e4029e"}, + {file = "coverage-7.6.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea3c8f04b3e4af80e17bab607c386a830ffc2fb88a5484e1df756478cf70d1d3"}, + {file = "coverage-7.6.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:507a20fc863cae1d5720797761b42d2d87a04b3e5aeb682ef3b7332e90598f43"}, + {file = "coverage-7.6.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37a84878285b903c0fe21ac8794c6dab58150e9359f1aaebbeddd6412d53132"}, + {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a534738b47b0de1995f85f582d983d94031dffb48ab86c95bdf88dc62212142f"}, + {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d7a2bf79378d8fb8afaa994f91bfd8215134f8631d27eba3e0e2c13546ce994"}, + {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6713ba4b4ebc330f3def51df1d5d38fad60b66720948112f114968feb52d3f99"}, + {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab32947f481f7e8c763fa2c92fd9f44eeb143e7610c4ca9ecd6a36adab4081bd"}, + {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7bbd8c8f1b115b892e34ba66a097b915d3871db7ce0e6b9901f462ff3a975377"}, + {file = "coverage-7.6.10-cp311-cp311-win32.whl", hash = "sha256:299e91b274c5c9cdb64cbdf1b3e4a8fe538a7a86acdd08fae52301b28ba297f8"}, + {file = "coverage-7.6.10-cp311-cp311-win_amd64.whl", hash = "sha256:489a01f94aa581dbd961f306e37d75d4ba16104bbfa2b0edb21d29b73be83609"}, + {file = "coverage-7.6.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c6e64726b307782fa5cbe531e7647aee385a29b2107cd87ba7c0105a5d3853"}, + {file = "coverage-7.6.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c56e097019e72c373bae32d946ecf9858fda841e48d82df7e81c63ac25554078"}, + {file = "coverage-7.6.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7827a5bc7bdb197b9e066cdf650b2887597ad124dd99777332776f7b7c7d0d0"}, + {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204a8238afe787323a8b47d8be4df89772d5c1e4651b9ffa808552bdf20e1d50"}, + {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67926f51821b8e9deb6426ff3164870976fe414d033ad90ea75e7ed0c2e5022"}, + {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e78b270eadb5702938c3dbe9367f878249b5ef9a2fcc5360ac7bff694310d17b"}, + {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:714f942b9c15c3a7a5fe6876ce30af831c2ad4ce902410b7466b662358c852c0"}, + {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:abb02e2f5a3187b2ac4cd46b8ced85a0858230b577ccb2c62c81482ca7d18852"}, + {file = "coverage-7.6.10-cp312-cp312-win32.whl", hash = "sha256:55b201b97286cf61f5e76063f9e2a1d8d2972fc2fcfd2c1272530172fd28c359"}, + {file = "coverage-7.6.10-cp312-cp312-win_amd64.whl", hash = "sha256:e4ae5ac5e0d1e4edfc9b4b57b4cbecd5bc266a6915c500f358817a8496739247"}, + {file = "coverage-7.6.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05fca8ba6a87aabdd2d30d0b6c838b50510b56cdcfc604d40760dae7153b73d9"}, + {file = "coverage-7.6.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9e80eba8801c386f72e0712a0453431259c45c3249f0009aff537a517b52942b"}, + {file = "coverage-7.6.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a372c89c939d57abe09e08c0578c1d212e7a678135d53aa16eec4430adc5e690"}, + {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec22b5e7fe7a0fa8509181c4aac1db48f3dd4d3a566131b313d1efc102892c18"}, + {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26bcf5c4df41cad1b19c84af71c22cbc9ea9a547fc973f1f2cc9a290002c8b3c"}, + {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e4630c26b6084c9b3cb53b15bd488f30ceb50b73c35c5ad7871b869cb7365fd"}, + {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2396e8116db77789f819d2bc8a7e200232b7a282c66e0ae2d2cd84581a89757e"}, + {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79109c70cc0882e4d2d002fe69a24aa504dec0cc17169b3c7f41a1d341a73694"}, + {file = "coverage-7.6.10-cp313-cp313-win32.whl", hash = "sha256:9e1747bab246d6ff2c4f28b4d186b205adced9f7bd9dc362051cc37c4a0c7bd6"}, + {file = "coverage-7.6.10-cp313-cp313-win_amd64.whl", hash = "sha256:254f1a3b1eef5f7ed23ef265eaa89c65c8c5b6b257327c149db1ca9d4a35f25e"}, + {file = "coverage-7.6.10-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ccf240eb719789cedbb9fd1338055de2761088202a9a0b73032857e53f612fe"}, + {file = "coverage-7.6.10-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0c807ca74d5a5e64427c8805de15b9ca140bba13572d6d74e262f46f50b13273"}, + {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bcfa46d7709b5a7ffe089075799b902020b62e7ee56ebaed2f4bdac04c508d8"}, + {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e0de1e902669dccbf80b0415fb6b43d27edca2fbd48c74da378923b05316098"}, + {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7b444c42bbc533aaae6b5a2166fd1a797cdb5eb58ee51a92bee1eb94a1e1cb"}, + {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b330368cb99ef72fcd2dc3ed260adf67b31499584dc8a20225e85bfe6f6cfed0"}, + {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9a7cfb50515f87f7ed30bc882f68812fd98bc2852957df69f3003d22a2aa0abf"}, + {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f93531882a5f68c28090f901b1d135de61b56331bba82028489bc51bdd818d2"}, + {file = "coverage-7.6.10-cp313-cp313t-win32.whl", hash = "sha256:89d76815a26197c858f53c7f6a656686ec392b25991f9e409bcef020cd532312"}, + {file = "coverage-7.6.10-cp313-cp313t-win_amd64.whl", hash = "sha256:54a5f0f43950a36312155dae55c505a76cd7f2b12d26abeebbe7a0b36dbc868d"}, + {file = "coverage-7.6.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:656c82b8a0ead8bba147de9a89bda95064874c91a3ed43a00e687f23cc19d53a"}, + {file = "coverage-7.6.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccc2b70a7ed475c68ceb548bf69cec1e27305c1c2606a5eb7c3afff56a1b3b27"}, + {file = "coverage-7.6.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5e37dc41d57ceba70956fa2fc5b63c26dba863c946ace9705f8eca99daecdc4"}, + {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0aa9692b4fdd83a4647eeb7db46410ea1322b5ed94cd1715ef09d1d5922ba87f"}, + {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa744da1820678b475e4ba3dfd994c321c5b13381d1041fe9c608620e6676e25"}, + {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0b1818063dc9e9d838c09e3a473c1422f517889436dd980f5d721899e66f315"}, + {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:59af35558ba08b758aec4d56182b222976330ef8d2feacbb93964f576a7e7a90"}, + {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7ed2f37cfce1ce101e6dffdfd1c99e729dd2ffc291d02d3e2d0af8b53d13840d"}, + {file = "coverage-7.6.10-cp39-cp39-win32.whl", hash = "sha256:4bcc276261505d82f0ad426870c3b12cb177752834a633e737ec5ee79bbdff18"}, + {file = "coverage-7.6.10-cp39-cp39-win_amd64.whl", hash = "sha256:457574f4599d2b00f7f637a0700a6422243b3565509457b2dbd3f50703e11f59"}, + {file = "coverage-7.6.10-pp39.pp310-none-any.whl", hash = "sha256:fd34e7b3405f0cc7ab03d54a334c17a9e802897580d964bd8c2001f4b9fd488f"}, + {file = "coverage-7.6.10.tar.gz", hash = "sha256:7fb105327c8f8f0682e29843e2ff96af9dcbe5bab8eeb4b398c6a33a16d80a23"}, ] [package.dependencies] @@ -1439,13 +1426,13 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "jinja2 (>=3.0.3,<3.1.0)", " [[package]] name = "diffusers" -version = "0.31.0" +version = "0.32.1" description = "State-of-the-art diffusion in PyTorch and JAX." optional = true python-versions = ">=3.8.0" files = [ - {file = "diffusers-0.31.0-py3-none-any.whl", hash = "sha256:cbc498ae63f4abfc7c3a07649cdcbee229ef2f9a9a1f0d19c9bbaf22f8d30c1f"}, - {file = "diffusers-0.31.0.tar.gz", hash = "sha256:b1d01a73e45d43a0630c299173915dddd69fc50f2ae8f2ab5de4fd245eaed72f"}, + {file = "diffusers-0.32.1-py3-none-any.whl", hash = "sha256:ab2ed6efe9dd2364db55b94fae453ff149dc06693ec99702cadfbeeaaa00c9c1"}, + {file = "diffusers-0.32.1.tar.gz", hash = "sha256:d48c19a4bce612d225f857e2e5d4f6ee003b88234af46b44c20119ee23f1568c"}, ] [package.dependencies] @@ -1459,12 +1446,12 @@ requests = "*" safetensors = ">=0.3.1" [package.extras] -dev = ["GitPython (<3.1.19)", "Jinja2", "accelerate (>=0.31.0)", "compel (==0.1.8)", "datasets", "flax (>=0.4.1)", "hf-doc-builder (>=0.3.0)", "invisible-watermark (>=0.2.0)", "isort (>=5.5.4)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "ruff (==0.1.5)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "torch (>=1.4,<2.5.0)", "torchvision", "transformers (>=4.41.2)", "urllib3 (<=2.0.0)"] +dev = ["GitPython (<3.1.19)", "Jinja2", "accelerate (>=0.31.0)", "compel (==0.1.8)", "datasets", "flax (>=0.4.1)", "hf-doc-builder (>=0.3.0)", "invisible-watermark (>=0.2.0)", "isort (>=5.5.4)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "ruff (==0.1.5)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "torch (>=1.4)", "torchvision", "transformers (>=4.41.2)", "urllib3 (<=2.0.0)"] docs = ["hf-doc-builder (>=0.3.0)"] flax = ["flax (>=0.4.1)", "jax (>=0.4.1)", "jaxlib (>=0.4.1)"] quality = ["hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<=2.0.0)"] test = ["GitPython (<3.1.19)", "Jinja2", "compel (==0.1.8)", "datasets", "invisible-watermark (>=0.2.0)", "k-diffusion (>=0.0.12)", "librosa", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock (==1.10.0)", "safetensors (>=0.3.1)", "scipy", "sentencepiece (>=0.1.91,!=0.1.92)", "torchvision", "transformers (>=4.41.2)"] -torch = ["accelerate (>=0.31.0)", "torch (>=1.4,<2.5.0)"] +torch = ["accelerate (>=0.31.0)", "torch (>=1.4)"] training = ["Jinja2", "accelerate (>=0.31.0)", "datasets", "peft (>=0.6.0)", "protobuf (>=3.20.3,<4)", "tensorboard"] [[package]] @@ -1939,13 +1926,13 @@ pydantic = ">=2.9.1" [[package]] name = "flatbuffers" -version = "24.3.25" +version = "24.12.23" description = "The FlatBuffers serialization format for Python" optional = true python-versions = "*" files = [ - {file = "flatbuffers-24.3.25-py2.py3-none-any.whl", hash = "sha256:8dbdec58f935f3765e4f7f3cf635ac3a77f83568138d6a2311f524ec96364812"}, - {file = "flatbuffers-24.3.25.tar.gz", hash = "sha256:de2ec5b203f21441716617f38443e0a8ebf3d25bf0d9c0bb0ce68fa00ad546a4"}, + {file = "flatbuffers-24.12.23-py2.py3-none-any.whl", hash = "sha256:c418e0d48890f4142b92fd3e343e73a48f194e1f80075ddcc5793779b3585444"}, + {file = "flatbuffers-24.12.23.tar.gz", hash = "sha256:2910b0bc6ae9b6db78dd2b18d0b7a0709ba240fb5585f286a3a2b30785c22dac"}, ] [[package]] @@ -3365,125 +3352,91 @@ files = [ [[package]] name = "kiwisolver" -version = "1.4.7" +version = "1.4.8" description = "A fast implementation of the Cassowary constraint solver" optional = false -python-versions = ">=3.8" +python-versions = ">=3.10" files = [ - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"}, - {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, + {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db"}, + {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b"}, + {file = "kiwisolver-1.4.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce2cf1e5688edcb727fdf7cd1bbd0b6416758996826a8be1d958f91880d0809d"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c8bf637892dc6e6aad2bc6d4d69d08764166e5e3f69d469e55427b6ac001b19d"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:034d2c891f76bd3edbdb3ea11140d8510dca675443da7304205a2eaa45d8334c"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47b28d1dfe0793d5e96bce90835e17edf9a499b53969b03c6c47ea5985844c3"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb158fe28ca0c29f2260cca8c43005329ad58452c36f0edf298204de32a9a3ed"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5536185fce131780ebd809f8e623bf4030ce1b161353166c49a3c74c287897f"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:369b75d40abedc1da2c1f4de13f3482cb99e3237b38726710f4a793432b1c5ff"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:641f2ddf9358c80faa22e22eb4c9f54bd3f0e442e038728f500e3b978d00aa7d"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d561d2d8883e0819445cfe58d7ddd673e4015c3c57261d7bdcd3710d0d14005c"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1732e065704b47c9afca7ffa272f845300a4eb959276bf6970dc07265e73b605"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bcb1ebc3547619c3b58a39e2448af089ea2ef44b37988caf432447374941574e"}, + {file = "kiwisolver-1.4.8-cp310-cp310-win_amd64.whl", hash = "sha256:89c107041f7b27844179ea9c85d6da275aa55ecf28413e87624d033cf1f6b751"}, + {file = "kiwisolver-1.4.8-cp310-cp310-win_arm64.whl", hash = "sha256:b5773efa2be9eb9fcf5415ea3ab70fc785d598729fd6057bea38d539ead28271"}, + {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84"}, + {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561"}, + {file = "kiwisolver-1.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67"}, + {file = "kiwisolver-1.4.8-cp311-cp311-win_amd64.whl", hash = "sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34"}, + {file = "kiwisolver-1.4.8-cp311-cp311-win_arm64.whl", hash = "sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2"}, + {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502"}, + {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31"}, + {file = "kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8"}, + {file = "kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50"}, + {file = "kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476"}, + {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09"}, + {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1"}, + {file = "kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb"}, + {file = "kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2"}, + {file = "kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e7a019419b7b510f0f7c9dceff8c5eae2392037eae483a7f9162625233802b0a"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:286b18e86682fd2217a48fc6be6b0f20c1d0ed10958d8dc53453ad58d7be0bf8"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4191ee8dfd0be1c3666ccbac178c5a05d5f8d689bbe3fc92f3c4abec817f8fe0"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cd2785b9391f2873ad46088ed7599a6a71e762e1ea33e87514b1a441ed1da1c"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c07b29089b7ba090b6f1a669f1411f27221c3662b3a1b7010e67b59bb5a6f10b"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b"}, + {file = "kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e"}, ] [[package]] @@ -3615,13 +3568,13 @@ test = ["coverage", "pytest", "pytest-cov"] [[package]] name = "litellm" -version = "1.55.9" +version = "1.55.12" description = "Library to easily interface with LLM API providers" optional = true python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" files = [ - {file = "litellm-1.55.9-py3-none-any.whl", hash = "sha256:5ea931bee64535090d49a54e6b9842883fa6cabd6849c3c9674c12b166145da0"}, - {file = "litellm-1.55.9.tar.gz", hash = "sha256:861be3447552db32da05abff8af4945d1dd84df2f4b10985f97120dca5c07a42"}, + {file = "litellm-1.55.12-py3-none-any.whl", hash = "sha256:d034c3d6cdd5b8dd2c56f1100eb14f41f920b7673ac943d38fb2d7155ae5774b"}, + {file = "litellm-1.55.12.tar.gz", hash = "sha256:6d93025ebf29ae3d6929a90c44d648ba6804fdbc1bb1c3a72d7dddd08f6229dc"}, ] [package.dependencies] @@ -3634,7 +3587,6 @@ jsonschema = ">=4.22.0,<5.0.0" openai = ">=1.55.3" pydantic = ">=2.0.0,<3.0.0" python-dotenv = ">=0.2.0" -requests = ">=2.31.0,<3.0.0" tiktoken = ">=0.7.0" tokenizers = "*" @@ -4036,6 +3988,7 @@ python-versions = ">=3.7" files = [ {file = "milvus_lite-2.4.10-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:fc4246d3ed7d1910847afce0c9ba18212e93a6e9b8406048436940578dfad5cb"}, {file = "milvus_lite-2.4.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:74a8e07c5e3b057df17fbb46913388e84df1dc403a200f4e423799a58184c800"}, + {file = "milvus_lite-2.4.10-py3-none-manylinux2014_aarch64.whl", hash = "sha256:240c7386b747bad696ecb5bd1f58d491e86b9d4b92dccee3315ed7256256eddc"}, {file = "milvus_lite-2.4.10-py3-none-manylinux2014_x86_64.whl", hash = "sha256:211d2e334a043f9282bdd9755f76b9b2d93b23bffa7af240919ffce6a8dfe325"}, ] @@ -4343,17 +4296,17 @@ testing-docutils = ["pygments", "pytest (>=8,<9)", "pytest-param-files (>=0.6.0, [[package]] name = "narwhals" -version = "1.19.0" +version = "1.19.1" description = "Extremely lightweight compatibility layer between dataframe libraries" optional = false python-versions = ">=3.8" files = [ - {file = "narwhals-1.19.0-py3-none-any.whl", hash = "sha256:517eca140103dbf61e4513fe462885a06bc21b565521a5ac0b79a7e31f152efe"}, - {file = "narwhals-1.19.0.tar.gz", hash = "sha256:a1a03bf922548ed1da5426acc954327a94c02c3a08558b65a0937dbd3b77fb48"}, + {file = "narwhals-1.19.1-py3-none-any.whl", hash = "sha256:72476dcc95f1d3f2c0c1f047cdd4879346b2871fe13d1223025466d8a3dcaea4"}, + {file = "narwhals-1.19.1.tar.gz", hash = "sha256:e597e7ed9da42ffb2c80b21e174817b27592a8570edea8c46c90a726e3b796af"}, ] [package.extras] -cudf = ["cudf (>=23.08.00)"] +cudf = ["cudf (>=24.12.0)"] dask = ["dask[dataframe] (>=2024.7)"] dev = ["covdefaults", "dask[dataframe]", "duckdb", "hypothesis", "hypothesis[numpy]", "pandas", "polars", "pyarrow", "pyarrow-stubs", "pyspark", "pytest", "pytest-cov", "pytest-env", "pytest-randomly", "scikit-learn", "tqdm", "typing-extensions"] docs = ["black", "duckdb", "jinja2", "markdown-exec[ansi]", "mkdocs", "mkdocs-autorefs", "mkdocs-material", "mkdocstrings[python]", "pandas", "polars (>=1.0.0)", "pyarrow"] @@ -4445,13 +4398,13 @@ test = ["pep440", "pre-commit", "pytest", "testpath"] [[package]] name = "nbsphinx" -version = "0.9.5" +version = "0.9.6" description = "Jupyter Notebook Tools for Sphinx" optional = false python-versions = ">=3.6" files = [ - {file = "nbsphinx-0.9.5-py3-none-any.whl", hash = "sha256:d82f71084425db1f48e72515f15c25b4de8652ceaab513ee462ac05f1b8eae0a"}, - {file = "nbsphinx-0.9.5.tar.gz", hash = "sha256:736916e7b0dab28fc904f4a9ae3b53a9a50c29fccc6329c052fcc7485abcf2b7"}, + {file = "nbsphinx-0.9.6-py3-none-any.whl", hash = "sha256:336b0b557945a7678ec7449b16449f854bc852a435bb53b8a72e6b5dc740d992"}, + {file = "nbsphinx-0.9.6.tar.gz", hash = "sha256:c2b28a2d702f1159a95b843831798e86e60a17fc647b9bff9ba1585355de54e3"}, ] [package.dependencies] @@ -5354,43 +5307,31 @@ python-versions = ">=3.9" files = [ {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, - {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, - {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, - {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, - {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, - {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, - {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, - {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, - {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, - {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, - {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, - {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, - {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, @@ -5960,20 +5901,20 @@ virtualenv = ">=20.10.0" [[package]] name = "primp" -version = "0.9.1" +version = "0.9.2" description = "HTTP client that can impersonate web browsers, mimicking their headers and `TLS/JA3/JA4/HTTP2` fingerprints" optional = true python-versions = ">=3.8" files = [ - {file = "primp-0.9.1-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:4204cd36454168c2fb50cfbc124eb08abc8dd54a70be31522aa0f83aec1c1df9"}, - {file = "primp-0.9.1-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:bdc7c77edd51e3da675f58a7c6c1e2d6c80a72bc519529143a27f45e24de7759"}, - {file = "primp-0.9.1-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb93dd24f1b96ebf94501a4e5d2ee74c53b7a24fd25fc72806453cdd62116094"}, - {file = "primp-0.9.1-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:035e134b33195e6a4b8e46ad56c929543ab758c3f373d5277017eb53ba60101d"}, - {file = "primp-0.9.1-cp38-abi3-manylinux_2_34_armv7l.whl", hash = "sha256:d38c29116c87d71951214fbd06b9f61c55a8ab0c62b7ea926565f8de7fbcdd87"}, - {file = "primp-0.9.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9eaa20ff6c0157aa9d2925decbf09fc915b2084fc0e8c2eeba125b2f99ac2a41"}, - {file = "primp-0.9.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:8b4fdc844e096025fa36c175390c298b71bd9495e9a1c4986afb198557793b54"}, - {file = "primp-0.9.1-cp38-abi3-win_amd64.whl", hash = "sha256:36996df75c24b8fca669041d0d636e84c8d67cad86c8e94ccb80884f08b2984b"}, - {file = "primp-0.9.1.tar.gz", hash = "sha256:961b4791b855d30673d68891c574928a585794c5c1fd4474e5429988d74866cc"}, + {file = "primp-0.9.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:a3179640e633be843ed5daba5c4e3086ad91f77c7bb40a9db06326f28d56b12b"}, + {file = "primp-0.9.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:94a5da8ba25f74152b43bc16a7591dfb5d7d30a5827dc0a0f96a956f7d3616be"}, + {file = "primp-0.9.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0668c0abb6d56fc8b0a918179b1d0f68e7267c1dc632e2b683c618317e13143f"}, + {file = "primp-0.9.2-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:a9c29a4b8eabfc28a1746d2fe93d33b9fcf2e81e642dd0e3eaecede60cc36b7d"}, + {file = "primp-0.9.2-cp38-abi3-manylinux_2_34_armv7l.whl", hash = "sha256:04d499308a101b06b40f5fda1bdc795db5731cd0dfbb1a8873f4acd07c085b1d"}, + {file = "primp-0.9.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4cd5daf39034a0a8c96cdc0c4c306184c6f2b1b2a0b39dc3294d79ed28a6f7fe"}, + {file = "primp-0.9.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:8d14653434837eb431b3cf7ca006647d7a196906e48bba96bb600ba2ba70bcdc"}, + {file = "primp-0.9.2-cp38-abi3-win_amd64.whl", hash = "sha256:80d9f07564dc9b25b1a9676df770561418557c124fedecae84f6491a1974b61d"}, + {file = "primp-0.9.2.tar.gz", hash = "sha256:5b95666c25b9107eab3c05a89cb7b1748d5122e57c57b25bfc3249d525c45300"}, ] [package.extras] @@ -6236,6 +6177,7 @@ description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs optional = true python-versions = ">=3.8" files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] @@ -6246,6 +6188,7 @@ description = "A collection of ASN.1-based protocols modules" optional = true python-versions = ">=3.8" files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, ] @@ -6571,13 +6514,13 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pymilvus" -version = "2.5.0" +version = "2.5.2" description = "Python Sdk for Milvus" optional = true python-versions = ">=3.8" files = [ - {file = "pymilvus-2.5.0-py3-none-any.whl", hash = "sha256:a0e8653d8fe78019abfda79b3404ef7423f312501e8cbd7dc728051ce8732652"}, - {file = "pymilvus-2.5.0.tar.gz", hash = "sha256:4da14a3bd957a4921166f9355fd1f1ac5c5e4e80b46f12f64d9c9a6dcb8cb395"}, + {file = "pymilvus-2.5.2-py3-none-any.whl", hash = "sha256:a162a6a27c11322333dfddc37e055b3c76625032985bae2c985b7c7a0838fce7"}, + {file = "pymilvus-2.5.2.tar.gz", hash = "sha256:f54aa42f0ed10fa58dafa8c309e9b0add21abe8b1cda684a9560fc4960dfbe61"}, ] [package.dependencies] @@ -8292,13 +8235,13 @@ type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (>=1.12 [[package]] name = "sglang" -version = "0.4.0.post2" +version = "0.4.1" description = "SGLang is yet another fast serving framework for large language models and vision language models." optional = true python-versions = ">=3.8" files = [ - {file = "sglang-0.4.0.post2-py3-none-any.whl", hash = "sha256:67c13c603e35e81c93899cf5eeed2f2eee62950449ebf1e3d5f712b437c74e0c"}, - {file = "sglang-0.4.0.post2.tar.gz", hash = "sha256:32a61e1da1991d8d35deeec756dfe0392a983797be0ba32e05ef66d6b4b6d6df"}, + {file = "sglang-0.4.1-py3-none-any.whl", hash = "sha256:eec41a21d0d0abd492fba5f970dcd009a9d319d8e444b0e0b7aaec8d524f4fe2"}, + {file = "sglang-0.4.1.tar.gz", hash = "sha256:79db4073ba8ebc261d27c4a082c760ac52ae0d3bd91976dd516c6870cd5a6f30"}, ] [package.dependencies] @@ -8320,8 +8263,8 @@ dev-hpu = ["sglang[all-hpu]", "sglang[test]"] dev-xpu = ["sglang[all-xpu]", "sglang[test]"] litellm = ["litellm (>=1.0.0)"] openai = ["openai (>=1.0)", "tiktoken"] -runtime-common = ["aiohttp", "decord", "fastapi", "gemlite", "hf_transfer", "huggingface_hub", "interegular", "modelscope", "orjson", "outlines (>=0.0.44,<0.1.0)", "packaging", "pillow", "prometheus-client (>=0.20.0)", "psutil", "pydantic", "python-multipart", "pyzmq (>=25.1.2)", "torchao (>=0.7.0)", "uvicorn", "uvloop", "xgrammar (>=0.1.6)"] -srt = ["cuda-python", "flashinfer (==0.1.6)", "sglang[runtime-common]", "torch", "vllm (>=0.6.3.post1,<=0.6.4.post1)"] +runtime-common = ["aiohttp", "decord", "fastapi", "hf_transfer", "huggingface_hub", "interegular", "modelscope", "orjson", "outlines (>=0.0.44,<0.1.0)", "packaging", "pillow", "prometheus-client (>=0.20.0)", "psutil", "pydantic", "python-multipart", "pyzmq (>=25.1.2)", "torchao (>=0.7.0)", "uvicorn", "uvloop", "xgrammar (>=0.1.6)"] +srt = ["cuda-python", "flashinfer (==0.1.6)", "sgl-kernel (>=0.0.2.post8)", "sglang[runtime-common]", "torch", "vllm (>=0.6.3.post1,<=0.6.4.post1)"] srt-hip = ["sglang[runtime-common]", "torch", "vllm (==0.6.3.dev13)"] srt-hpu = ["sglang[runtime-common]"] srt-xpu = ["sglang[runtime-common]"] @@ -9279,13 +9222,13 @@ vision = ["Pillow (>=10.0.1,<=15.0)"] [[package]] name = "trio" -version = "0.27.0" +version = "0.28.0" description = "A friendly Python library for async concurrency and I/O" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "trio-0.27.0-py3-none-any.whl", hash = "sha256:68eabbcf8f457d925df62da780eff15ff5dc68fd6b367e2dde59f7aaf2a0b884"}, - {file = "trio-0.27.0.tar.gz", hash = "sha256:1dcc95ab1726b2da054afea8fd761af74bad79bd52381b84eae408e983c76831"}, + {file = "trio-0.28.0-py3-none-any.whl", hash = "sha256:56d58977acc1635735a96581ec70513cc781b8b6decd299c487d3be2a721cd94"}, + {file = "trio-0.28.0.tar.gz", hash = "sha256:4e547896fe9e8a5658e54e4c7c5fa1db748cbbbaa7c965e7d40505b928c73c05"}, ] [package.dependencies] @@ -9443,13 +9386,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "75.6.0.20241126" +version = "75.6.0.20241223" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types_setuptools-75.6.0.20241126-py3-none-any.whl", hash = "sha256:aaae310a0e27033c1da8457d4d26ac673b0c8a0de7272d6d4708e263f2ea3b9b"}, - {file = "types_setuptools-75.6.0.20241126.tar.gz", hash = "sha256:7bf25ad4be39740e469f9268b6beddda6e088891fa5a27e985c6ce68bf62ace0"}, + {file = "types_setuptools-75.6.0.20241223-py3-none-any.whl", hash = "sha256:7cbfd3bf2944f88bbcdd321b86ddd878232a277be95d44c78a53585d78ebc2f6"}, + {file = "types_setuptools-75.6.0.20241223.tar.gz", hash = "sha256:d9478a985057ed48a994c707f548e55aababa85fe1c9b212f43ab5a1fffd3211"}, ] [[package]] @@ -10338,13 +10281,13 @@ propcache = ">=0.2.0" [[package]] name = "yt-dlp" -version = "2024.12.13" +version = "2024.12.23" description = "A feature-rich command-line audio/video downloader" optional = true python-versions = ">=3.9" files = [ - {file = "yt_dlp-2024.12.13-py3-none-any.whl", hash = "sha256:5a16b7511e8500cbb13ff0babc9c6deb1e049dc1c854a51738aad2529167fcdf"}, - {file = "yt_dlp-2024.12.13.tar.gz", hash = "sha256:77e15afb9d460ecb7294a39bb5e39dc9f4e8a65f3a37ef4db58800b94d095511"}, + {file = "yt_dlp-2024.12.23-py3-none-any.whl", hash = "sha256:2fc08a5221a0379628ac4e7324c6c69a95b9fdfa7a7ca3187444b3b7451e38be"}, + {file = "yt_dlp-2024.12.23.tar.gz", hash = "sha256:ac0e72b5a9017ba104b4258546201a7cedc38e8bd20727e0c63b77c829b425e9"}, ] [package.extras] From c0e6a01729776e3de8e642e11945ac5c8800dad5 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Sat, 28 Dec 2024 19:29:04 -0600 Subject: [PATCH 03/28] move functions into utils.py --- camel/agents/chat_agent.py | 350 +++-------------------------------- camel/agents/utils.py | 138 ++++++++++++++ camel/models/openai_model.py | 51 +++-- 3 files changed, 202 insertions(+), 337 deletions(-) create mode 100644 camel/agents/utils.py diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 38f653b266..17d03eeb44 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -15,8 +15,6 @@ import json import logging -import re -import textwrap from collections import defaultdict from typing import ( TYPE_CHECKING, @@ -30,8 +28,7 @@ Union, ) -from openai.types.chat import ChatCompletionMessageToolCall -from pydantic import BaseModel, ValidationError +from pydantic import BaseModel from camel.agents.base import BaseAgent from camel.memories import ( @@ -60,11 +57,6 @@ from camel.utils import ( get_model_encoding, ) -from camel.utils.commons import ( - func_string_to_callable, - get_pydantic_object_schema, - json_to_function_code, -) if TYPE_CHECKING: from openai import Stream @@ -86,75 +78,6 @@ from camel.utils import track_agent -def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str: - r"""Generates a tool prompt based on the provided tool schema list. - - Returns: - str: A string representing the tool prompt. - """ - tool_prompts = [] - - for tool in tool_schema_list: - tool_info = tool["function"] - tool_name = tool_info["name"] - tool_description = tool_info["description"] - tool_json = json.dumps(tool_info, indent=4) - - prompt = ( - f"Use the function '{tool_name}' to '{tool_description}':\n" - f"{tool_json}\n" - ) - tool_prompts.append(prompt) - - tool_prompt_str = "\n".join(tool_prompts) - - final_prompt = textwrap.dedent( - f"""\ - You have access to the following functions: - - {tool_prompt_str} - - If you choose to call a function ONLY reply in the following format with no prefix or suffix: - - {{"example_name": "example_value"}} - - Reminder: - - Function calls MUST follow the specified format, start with - - Required parameters MUST be specified - - Only call one function at a time - - Put the entire function call reply on one line - - If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls. - """ # noqa: E501 - ) - return final_prompt - - -def _parse_tool_response(self, response: str): - r"""Parses the tool response to extract the function name and - arguments. - - Args: - response (str): The response from the model containing the - function call. - - Returns: - Optional[Dict[str, Any]]: The parsed function name and arguments - if found, otherwise :obj:`None`. - """ - function_regex = r"(.*?)" - match = re.search(function_regex, response) - - if match: - function_name, args_string = match.groups() - try: - args = json.loads(args_string) - return {"function": function_name, "arguments": args} - except json.JSONDecodeError as error: - logger.error(f"Error parsing function arguments: {error}") - return None - return None - - class FunctionCallingRecord(BaseModel): r"""Historical records of functions called in the conversation. @@ -473,17 +396,6 @@ def step( return self._handle_step(response_format) - # def _inject_tool_prompt(self) -> None: - # r"""Generate and add the tool prompt to memory.""" - # tool_prompt = self._generate_tool_prompt( - # self.model_backend.model_config_dict["tools"] - # ) - # tool_msg = BaseMessage.make_assistant_message( - # role_name="Assistant", content=tool_prompt - # ) - # self.update_memory(tool_msg, OpenAIBackendRole.SYSTEM) - # self.tool_prompt_added = True - def _handle_step( self, response_format: Optional[Type[BaseModel]], @@ -507,38 +419,20 @@ def _handle_step( finish_reasons, usage_dict, response_id, - ) = self._step_model_response(openai_messages, num_tokens) - - # Try to parse structured output to return a Pydantic object - if response_format and isinstance(response, ChatCompletion): - content = response.choices[0].message.content - try: - json_content = json.loads(str(content)) - output_messages[0].parsed = response_format(**json_content) # type: ignore [assignment, misc] - except json.JSONDecodeError as e: - logger.error( - f"Failed in parsing the output into JSON: {e}" - ) - output_messages[0].parsed = None - except ValidationError as e: - logger.warning( - "Successfully generating JSON response, " - "but failed in parsing it into Pydantic object :" - f"{e}, return the JSON response in parsed field" - ) - output_messages[0].parsed = json_content + ) = self._step_model_response( + openai_messages, response_format, num_tokens + ) # Single-step mode if self.single_iteration: break # Handle tool requests - tool_request = self._extract_tool_call(response) - if isinstance(response, ChatCompletion) and tool_request: - response.choices[0].message.tool_calls = [tool_request] - tool_call_records.append( - self._step_tool_call_and_update(response) - ) + if ( + isinstance(response, ChatCompletion) + and response.choices[0].message.tool_calls + ): + tool_call_records.append(self._step_tool_call(response)) # Final info and response info = self._step_get_info( @@ -555,72 +449,6 @@ def _handle_step( msgs=output_messages, terminated=self.terminated, info=info ) - def _extract_tool_call( - self, response: Any - ) -> Optional[ChatCompletionMessageToolCall]: - r"""Extract the tool call from the model response, if present. - - Args: - response (Any): The model's response object. - - Returns: - Optional[ChatCompletionMessageToolCall]: The parsed tool call if - present, otherwise None. - """ - # # Check if the response contains tool calls - # if ( - # self.has_tools - # and not self.model_type.support_native_tool_calling - # and "" in response.choices[0].message.content - # ): - # parsed_content = self._parse_tool_response( - # response.choices[0].message.content - # ) - # if parsed_content: - # return ChatCompletionMessageToolCall( - # id=str(uuid.uuid4()), - # function=Function( - # arguments=str(parsed_content["arguments"]).replace( - # "'", '"' - # ), - # name=str(parsed_content["function"]), - # ), - # type="function", - # ) - # elif ( - # self.has_tools - # and self.model_type.support_native_tool_calling - # and response.choices[0].message.tool_calls - # ): - # return response.choices[0].message.tool_calls[0] - - # No tool call found - return None - - def _is_standard_response(self, response: Any) -> bool: - r"""Determine if the provided response is a standard reply without - tool calls. - - Args: - response (Any): The response object to evaluate. - - Returns: - bool: `True` if the response is a standard reply, `False` - otherwise. - """ - if not self.has_tools: - return True - - if not isinstance(response, ChatCompletion): - return True - - if self.model_type.support_native_tool_calling: - return response.choices[0].message.tool_calls is None - - return "" not in str( - response.choices[0].message.content or "" - ) - def _log_final_output(self, output_messages: List[BaseMessage]) -> None: r"""Log final messages or warnings about multiple responses.""" if len(output_messages) == 1: @@ -631,6 +459,7 @@ def _log_final_output(self, output_messages: List[BaseMessage]) -> None: "selected message manually using `record_message()`." ) + # TODO: Redesign this method async def step_async( self, input_message: Union[BaseMessage, str], @@ -679,7 +508,9 @@ async def step_async( finish_reasons, usage_dict, response_id, - ) = self._step_model_response(openai_messages, num_tokens) + ) = self._step_model_response( + openai_messages, response_format, num_tokens + ) if ( not self.has_tools @@ -690,23 +521,9 @@ async def step_async( # Normal function calling tool_call_records.append( - await self._step_tool_call_and_update_async(response) + await self._step_tool_call_async(response) ) - if ( - response_format is not None - and self.model_type.support_native_tool_calling - ): - ( - output_messages, - finish_reasons, - usage_dict, - response_id, - tool_call_record, - num_tokens, - ) = self._structure_output_with_function(response_format) - tool_call_records.append(tool_call_record) - info = self._step_get_info( output_messages, finish_reasons, @@ -730,122 +547,10 @@ async def step_async( msgs=output_messages, terminated=self.terminated, info=info ) - def _step_tool_call_and_update( - self, response: ChatCompletion - ) -> FunctionCallingRecord: - r"""Processes a function call within the chat completion response, - records the function call in the provided list of tool calls and - updates the memory of the current agent. - - Args: - response (ChatCompletion): The response object from the chat - completion. - - Returns: - FunctionCallingRecord: The record of calling the function. - """ - - # Perform function calling - func_assistant_msg, func_result_msg, tool_call_record = ( - self._step_tool_call(response) - ) - - # Update the messages - self.update_memory(func_assistant_msg, OpenAIBackendRole.ASSISTANT) - self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION) - - return tool_call_record - - async def _step_tool_call_and_update_async( - self, response: ChatCompletion - ) -> FunctionCallingRecord: - ( - func_assistant_msg, - func_result_msg, - func_record, - ) = await self.step_tool_call_async(response) - - self.update_memory(func_assistant_msg, OpenAIBackendRole.ASSISTANT) - self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION) - - return func_record - - # TODO: Simplify format -> Schema - def _structure_output_with_function( - self, response_format: Type[BaseModel] - ) -> Tuple[ - List[BaseMessage], - List[str], - Dict[str, int], - str, - FunctionCallingRecord, - int, - ]: - r"""Internal function of structuring the output of the agent based on - the given output schema. - - Args: - response_format (Type[BaseModel]): The output schema to use for - structuring the output. - - Returns: - Tuple[List[BaseMessage], List[str], Dict[str, int], str, - FunctionCallingRecord, int]: - A tuple containing the output messages, finish reasons, usage - dictionary, response ID, function calling record, and number of - tokens. - """ - from camel.toolkits import FunctionTool - - schema_json = get_pydantic_object_schema(response_format) - func_str = json_to_function_code(schema_json) - func_callable = func_string_to_callable(func_str) - func = FunctionTool(func_callable) - - original_model_dict = self.model_backend.model_config_dict - - # Replace the original tools with the structuring function - self._tools = [func] - self.model_backend.model_config_dict = original_model_dict.copy() - self.model_backend.model_config_dict["tools"] = [ - func.get_openai_tool_schema() - ] - self.model_backend.model_config_dict["tool_choice"] = "required" - - openai_messages, num_tokens = self.memory.get_context() - ( - response, - output_messages, - finish_reasons, - usage_dict, - response_id, - ) = self._step_model_response(openai_messages, num_tokens) - - if isinstance(response, ChatCompletion): - tool_call_record = self._step_tool_call_and_update(response) - else: - raise ValueError( - "Structured output is not supported for stream responses." - ) - - for base_message_item in output_messages: - base_message_item.content = json.dumps(tool_call_record.result) - - # Recover the original tools - self.model_backend.model_config_dict = original_model_dict - - return ( - output_messages, - finish_reasons, - usage_dict, - response_id, - tool_call_record, - num_tokens, - ) - def _step_model_response( self, openai_messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]], num_tokens: int, ) -> tuple[ Union[ChatCompletion, Stream], @@ -860,7 +565,9 @@ def _step_model_response( # Obtain the model's response for _ in range(len(self.model_backend.models)): try: - response = self.model_backend.run(openai_messages) + response = self.model_backend.run( + openai_messages, response_format, self.tool_schemas + ) break except Exception as exc: logger.error( @@ -1129,9 +836,7 @@ def _step_token_exceed( def _step_tool_call( self, response: ChatCompletion, - ) -> Tuple[ - FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord - ]: + ) -> FunctionCallingRecord: r"""Execute the function with arguments following the model's response. Args: @@ -1176,7 +881,10 @@ def _step_tool_call( func_record = FunctionCallingRecord( func_name=func_name, args=args, result=result ) - return assist_msg, func_msg, func_record + self.update_memory(assist_msg, OpenAIBackendRole.ASSISTANT) + self.update_memory(func_msg, OpenAIBackendRole.FUNCTION) + + return func_record def _safe_json_loads(self, arguments_str): # Replace Python types with their JSON equivalents @@ -1190,12 +898,10 @@ def _safe_json_loads(self, arguments_str): except json.JSONDecodeError as e: raise ValueError(f"Invalid JSON format: {e}") - async def step_tool_call_async( + async def _step_tool_call_async( self, response: ChatCompletion, - ) -> Tuple[ - FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord - ]: + ) -> FunctionCallingRecord: r"""Execute the async function with arguments following the model's response. @@ -1240,7 +946,11 @@ async def step_tool_call_async( func_record = FunctionCallingRecord( func_name=func_name, args=args, result=result ) - return assist_msg, func_msg, func_record + + self.update_memory(assist_msg, OpenAIBackendRole.ASSISTANT) + self.update_memory(func_msg, OpenAIBackendRole.FUNCTION) + + return func_record def get_usage_dict( self, output_messages: List[BaseMessage], prompt_tokens: int diff --git a/camel/agents/utils.py b/camel/agents/utils.py new file mode 100644 index 0000000000..50d149399a --- /dev/null +++ b/camel/agents/utils.py @@ -0,0 +1,138 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +import json +import logging +import re +import textwrap +import uuid +from typing import Any, Dict, List, Optional + +from openai.types.chat.chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall, + Function, +) + +logger = logging.getLogger(__name__) + + +def generate_tool_prompt(tool_schema_list: List[Dict[str, Any]]) -> str: + r"""Generates a tool prompt based on the provided tool schema list. + + Returns: + str: A string representing the tool prompt. + """ + tool_prompts = [] + + for tool in tool_schema_list: + tool_info = tool["function"] + tool_name = tool_info["name"] + tool_description = tool_info["description"] + tool_json = json.dumps(tool_info, indent=4) + + prompt = ( + f"Use the function '{tool_name}' to '{tool_description}':\n" + f"{tool_json}\n" + ) + tool_prompts.append(prompt) + + tool_prompt_str = "\n".join(tool_prompts) + + final_prompt = textwrap.dedent( + f"""\ + You have access to the following functions: + + {tool_prompt_str} + + If you choose to call a function ONLY reply in the following format with no prefix or suffix: + + {{"example_name": "example_value"}} + + Reminder: + - Function calls MUST follow the specified format, start with + - Required parameters MUST be specified + - Only call one function at a time + - Put the entire function call reply on one line + - If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls. + """ # noqa: E501 + ) + return final_prompt + + +def _parse_tool_response(response: str) -> Optional[Dict[str, Any]]: + r"""Parses the tool response to extract the function name and + arguments. + + Args: + response (str): The response from the model containing the + function call. + + Returns: + Optional[Dict[str, Any]]: The parsed function name and arguments + if found, otherwise :obj:`None`. + """ + function_regex = r"(.*?)" + match = re.search(function_regex, response) + + if match: + function_name, args_string = match.groups() + try: + args = json.loads(args_string) + return {"function": function_name, "arguments": args} + except json.JSONDecodeError as error: + logger.error(f"Error parsing function arguments: {error}") + return None + return None + + +def extract_tool_call( + self, response: Any +) -> Optional[ChatCompletionMessageToolCall]: + r"""Extract the tool call from the model response, if present. + + Args: + response (Any): The model's response object. + + Returns: + Optional[ChatCompletionMessageToolCall]: The parsed tool call if + present, otherwise None. + """ + # Check if the response contains tool calls + if ( + self.has_tools + and not self.model_type.support_native_tool_calling + and "" in response.choices[0].message.content + ): + parsed_content = _parse_tool_response( + response.choices[0].message.content + ) + if parsed_content: + return ChatCompletionMessageToolCall( + id=str(uuid.uuid4()), + function=Function( + arguments=str(parsed_content["arguments"]).replace( + "'", '"' + ), + name=str(parsed_content["function"]), + ), + type="function", + ) + elif ( + self.has_tools + and self.model_type.support_native_tool_calling + and response.choices[0].message.tool_calls + ): + return response.choices[0].message.tool_calls[0] + + # No tool call found + return None diff --git a/camel/models/openai_model.py b/camel/models/openai_model.py index 300a5b5921..f9451c6552 100644 --- a/camel/models/openai_model.py +++ b/camel/models/openai_model.py @@ -32,7 +32,7 @@ api_keys_required, ) -O1_UNSUPPORTED_PARAMS = [ +O1_UNSUPPORTED_PARAMS = { "temperature", "top_p", "presence_penalty", @@ -40,7 +40,7 @@ "logprobs", "top_logprobs", "logit_bias", -] +} class OpenAIModel(BaseModelBackend): @@ -133,38 +133,55 @@ def _run( in OpenAI API format. response_format (Optional[Type[BaseModel]]): The format of the response. - tools (Optional[List[str]]): The schema of the tools to use for the - request. + tools (Optional[List[Dict[str, Any]]]): The schema of the tools to + use for the request. Returns: Union[ChatCompletion, Stream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ + if response_format: + return self._request_parse(messages, response_format, tools) + else: + return self._request_chat_completion(messages, tools) + + def _request_chat_completion( + self, + messages: List[OpenAIMessage], + tools: Optional[List[Dict[str, Any]]], + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: request_config = self.model_config_dict.copy() - if tools: + if tools is not None: for tool in tools: function_dict = tool.get('function', {}) function_dict.pop("strict", None) - request_config["tools"] = tools + request_config["tools"] = tools - if response_format: - request_config["response_format"] = response_format - request_config.pop("stream", None) - response = self._client.beta.chat.completions.parse( - messages=messages, - model=self.model_type, - **request_config, - ) - return self._to_chat_completion(response) + return self._client.chat.completions.create( + messages=messages, + model=self.model_type, + **request_config, + ) + + def _request_parse( + self, + messages: List[OpenAIMessage], + response_format: Type[BaseModel], + tools: Optional[List[Dict[str, Any]]], + ) -> ChatCompletion: + request_config = self.model_config_dict.copy() + + request_config["response_format"] = response_format + request_config.pop("stream", None) + request_config["tools"] = tools - response = self._client.chat.completions.create( + return self._client.beta.chat.completions.parse( messages=messages, model=self.model_type, **request_config, ) - return response def check_model_config(self): r"""Check whether the model configuration contains any From 47f2c401e7676518418364d43328638063d21b58 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Mon, 30 Dec 2024 07:53:10 -0600 Subject: [PATCH 04/28] small fix --- camel/agents/chat_agent.py | 7 +- poetry.lock | 205 ++++++++++++++++++++----------------- 2 files changed, 115 insertions(+), 97 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 17d03eeb44..a42ee3ec2e 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -158,11 +158,12 @@ def __init__( token_limit: Optional[int] = None, output_language: Optional[str] = None, tools: Optional[List[Union[FunctionTool, Callable]]] = None, + external_tools: Optional[ + List[Union[FunctionTool, Callable, Dict[str, Any]]] + ] = None, response_terminators: Optional[List[ResponseTerminator]] = None, scheduling_strategy: str = "round_robin", single_iteration: bool = False, - # TODO: Remove this after refactoring - external_tools: Optional[List[Union[FunctionTool, Callable]]] = None, ) -> None: # Set up model backend self.model_backend = ModelManager( @@ -243,7 +244,7 @@ def tool_list(self) -> List[str]: return [tool.get_function_name() for tool in self._tools] @property - def tool_schemas(self) -> List[Dict]: + def tool_schemas(self) -> List[Dict[str, Any]]: return [tool.get_openai_tool_schema() for tool in self._tools] def add_tool(self, tool: Union[FunctionTool, Callable]) -> None: diff --git a/poetry.lock b/poetry.lock index 2b998d358e..bf54781f29 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2852,13 +2852,13 @@ files = [ [[package]] name = "identify" -version = "2.6.3" +version = "2.6.4" description = "File identification library for Python" optional = false python-versions = ">=3.9" files = [ - {file = "identify-2.6.3-py2.py3-none-any.whl", hash = "sha256:9edba65473324c2ea9684b1f944fe3191db3345e50b6d04571d10ed164f8d7bd"}, - {file = "identify-2.6.3.tar.gz", hash = "sha256:62f5dae9b5fef52c84cc188514e9ea4f3f636b1d8799ab5ebc475471f9e47a02"}, + {file = "identify-2.6.4-py2.py3-none-any.whl", hash = "sha256:993b0f01b97e0568c179bb9196391ff391bfb88a99099dbf5ce392b68f42d0af"}, + {file = "identify-2.6.4.tar.gz", hash = "sha256:285a7d27e397652e8cafe537a6cc97dd470a970f48fb2e9d979aa38eae5513ac"}, ] [package.extras] @@ -4035,15 +4035,18 @@ gcp = ["google-auth (==2.27.0)", "requests (>=2.32.3,<3.0.0)"] [[package]] name = "mistune" -version = "3.0.2" +version = "3.1.0" description = "A sane and fast Markdown parser with useful plugins and renderers" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "mistune-3.0.2-py3-none-any.whl", hash = "sha256:71481854c30fdbc938963d3605b72501f5c10a9320ecd412c121c163a1c7d205"}, - {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, + {file = "mistune-3.1.0-py3-none-any.whl", hash = "sha256:b05198cf6d671b3deba6c87ec6cf0d4eb7b72c524636eddb6dbf13823b52cee1"}, + {file = "mistune-3.1.0.tar.gz", hash = "sha256:dbcac2f78292b9dc066cd03b7a3a26b62d85f8159f2ea5fd28e55df79908d667"}, ] +[package.dependencies] +typing-extensions = {version = "*", markers = "python_version < \"3.11\""} + [[package]] name = "mock" version = "5.1.0" @@ -4308,20 +4311,21 @@ testing-docutils = ["pygments", "pytest (>=8,<9)", "pytest-param-files (>=0.6.0, [[package]] name = "narwhals" -version = "1.19.1" +version = "1.20.1" description = "Extremely lightweight compatibility layer between dataframe libraries" optional = false python-versions = ">=3.8" files = [ - {file = "narwhals-1.19.1-py3-none-any.whl", hash = "sha256:72476dcc95f1d3f2c0c1f047cdd4879346b2871fe13d1223025466d8a3dcaea4"}, - {file = "narwhals-1.19.1.tar.gz", hash = "sha256:e597e7ed9da42ffb2c80b21e174817b27592a8570edea8c46c90a726e3b796af"}, + {file = "narwhals-1.20.1-py3-none-any.whl", hash = "sha256:77fc10fed31534a4ecf0c5e1e091c91c454cb2fa73937f36be3fcb0c2dfdabc6"}, + {file = "narwhals-1.20.1.tar.gz", hash = "sha256:ffc6a44c1bc651531198c5f7fc38d349dff898ecfe51c1ef96aaaf429ec4dc19"}, ] [package.extras] -cudf = ["cudf (>=24.12.0)"] +cudf = ["cudf (>=24.10.0)"] dask = ["dask[dataframe] (>=2024.7)"] -dev = ["covdefaults", "dask[dataframe]", "duckdb", "hypothesis", "hypothesis[numpy]", "pandas", "polars", "pyarrow", "pyarrow-stubs", "pyspark", "pytest", "pytest-cov", "pytest-env", "pytest-randomly", "scikit-learn", "tqdm", "typing-extensions"] +dev = ["covdefaults", "duckdb", "hypothesis[numpy]", "pandas", "polars", "pre-commit", "pyarrow", "pyarrow-stubs", "pytest", "pytest-cov", "pytest-env", "pytest-randomly", "tqdm", "typing-extensions"] docs = ["black", "duckdb", "jinja2", "markdown-exec[ansi]", "mkdocs", "mkdocs-autorefs", "mkdocs-material", "mkdocstrings[python]", "pandas", "polars (>=1.0.0)", "pyarrow"] +extra = ["dask[dataframe]", "modin", "pyspark", "scikit-learn"] modin = ["modin"] pandas = ["pandas (>=0.25.3)"] polars = ["polars (>=0.20.3)"] @@ -5087,86 +5091,86 @@ opentelemetry-api = "1.27.0" [[package]] name = "orjson" -version = "3.10.12" +version = "3.10.13" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.12-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ece01a7ec71d9940cc654c482907a6b65df27251255097629d0dea781f255c6d"}, - {file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c34ec9aebc04f11f4b978dd6caf697a2df2dd9b47d35aa4cc606cabcb9df69d7"}, - {file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd6ec8658da3480939c79b9e9e27e0db31dffcd4ba69c334e98c9976ac29140e"}, - {file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f17e6baf4cf01534c9de8a16c0c611f3d94925d1701bf5f4aff17003677d8ced"}, - {file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6402ebb74a14ef96f94a868569f5dccf70d791de49feb73180eb3c6fda2ade56"}, - {file = "orjson-3.10.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0000758ae7c7853e0a4a6063f534c61656ebff644391e1f81698c1b2d2fc8cd2"}, - {file = "orjson-3.10.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:888442dcee99fd1e5bd37a4abb94930915ca6af4db50e23e746cdf4d1e63db13"}, - {file = "orjson-3.10.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c1f7a3ce79246aa0e92f5458d86c54f257fb5dfdc14a192651ba7ec2c00f8a05"}, - {file = "orjson-3.10.12-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:802a3935f45605c66fb4a586488a38af63cb37aaad1c1d94c982c40dcc452e85"}, - {file = "orjson-3.10.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1da1ef0113a2be19bb6c557fb0ec2d79c92ebd2fed4cfb1b26bab93f021fb885"}, - {file = "orjson-3.10.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a3273e99f367f137d5b3fecb5e9f45bcdbfac2a8b2f32fbc72129bbd48789c2"}, - {file = "orjson-3.10.12-cp310-none-win32.whl", hash = "sha256:475661bf249fd7907d9b0a2a2421b4e684355a77ceef85b8352439a9163418c3"}, - {file = "orjson-3.10.12-cp310-none-win_amd64.whl", hash = "sha256:87251dc1fb2b9e5ab91ce65d8f4caf21910d99ba8fb24b49fd0c118b2362d509"}, - {file = "orjson-3.10.12-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a734c62efa42e7df94926d70fe7d37621c783dea9f707a98cdea796964d4cf74"}, - {file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:750f8b27259d3409eda8350c2919a58b0cfcd2054ddc1bd317a643afc646ef23"}, - {file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb52c22bfffe2857e7aa13b4622afd0dd9d16ea7cc65fd2bf318d3223b1b6252"}, - {file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:440d9a337ac8c199ff8251e100c62e9488924c92852362cd27af0e67308c16ef"}, - {file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9e15c06491c69997dfa067369baab3bf094ecb74be9912bdc4339972323f252"}, - {file = "orjson-3.10.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:362d204ad4b0b8724cf370d0cd917bb2dc913c394030da748a3bb632445ce7c4"}, - {file = "orjson-3.10.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b57cbb4031153db37b41622eac67329c7810e5f480fda4cfd30542186f006ae"}, - {file = "orjson-3.10.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:165c89b53ef03ce0d7c59ca5c82fa65fe13ddf52eeb22e859e58c237d4e33b9b"}, - {file = "orjson-3.10.12-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:5dee91b8dfd54557c1a1596eb90bcd47dbcd26b0baaed919e6861f076583e9da"}, - {file = "orjson-3.10.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:77a4e1cfb72de6f905bdff061172adfb3caf7a4578ebf481d8f0530879476c07"}, - {file = "orjson-3.10.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:038d42c7bc0606443459b8fe2d1f121db474c49067d8d14c6a075bbea8bf14dd"}, - {file = "orjson-3.10.12-cp311-none-win32.whl", hash = "sha256:03b553c02ab39bed249bedd4abe37b2118324d1674e639b33fab3d1dafdf4d79"}, - {file = "orjson-3.10.12-cp311-none-win_amd64.whl", hash = "sha256:8b8713b9e46a45b2af6b96f559bfb13b1e02006f4242c156cbadef27800a55a8"}, - {file = "orjson-3.10.12-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:53206d72eb656ca5ac7d3a7141e83c5bbd3ac30d5eccfe019409177a57634b0d"}, - {file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac8010afc2150d417ebda810e8df08dd3f544e0dd2acab5370cfa6bcc0662f8f"}, - {file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed459b46012ae950dd2e17150e838ab08215421487371fa79d0eced8d1461d70"}, - {file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dcb9673f108a93c1b52bfc51b0af422c2d08d4fc710ce9c839faad25020bb69"}, - {file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22a51ae77680c5c4652ebc63a83d5255ac7d65582891d9424b566fb3b5375ee9"}, - {file = "orjson-3.10.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910fdf2ac0637b9a77d1aad65f803bac414f0b06f720073438a7bd8906298192"}, - {file = "orjson-3.10.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:24ce85f7100160936bc2116c09d1a8492639418633119a2224114f67f63a4559"}, - {file = "orjson-3.10.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a76ba5fc8dd9c913640292df27bff80a685bed3a3c990d59aa6ce24c352f8fc"}, - {file = "orjson-3.10.12-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ff70ef093895fd53f4055ca75f93f047e088d1430888ca1229393a7c0521100f"}, - {file = "orjson-3.10.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f4244b7018b5753ecd10a6d324ec1f347da130c953a9c88432c7fbc8875d13be"}, - {file = "orjson-3.10.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:16135ccca03445f37921fa4b585cff9a58aa8d81ebcb27622e69bfadd220b32c"}, - {file = "orjson-3.10.12-cp312-none-win32.whl", hash = "sha256:2d879c81172d583e34153d524fcba5d4adafbab8349a7b9f16ae511c2cee8708"}, - {file = "orjson-3.10.12-cp312-none-win_amd64.whl", hash = "sha256:fc23f691fa0f5c140576b8c365bc942d577d861a9ee1142e4db468e4e17094fb"}, - {file = "orjson-3.10.12-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:47962841b2a8aa9a258b377f5188db31ba49af47d4003a32f55d6f8b19006543"}, - {file = "orjson-3.10.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6334730e2532e77b6054e87ca84f3072bee308a45a452ea0bffbbbc40a67e296"}, - {file = "orjson-3.10.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:accfe93f42713c899fdac2747e8d0d5c659592df2792888c6c5f829472e4f85e"}, - {file = "orjson-3.10.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a7974c490c014c48810d1dede6c754c3cc46598da758c25ca3b4001ac45b703f"}, - {file = "orjson-3.10.12-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:3f250ce7727b0b2682f834a3facff88e310f52f07a5dcfd852d99637d386e79e"}, - {file = "orjson-3.10.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f31422ff9486ae484f10ffc51b5ab2a60359e92d0716fcce1b3593d7bb8a9af6"}, - {file = "orjson-3.10.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5f29c5d282bb2d577c2a6bbde88d8fdcc4919c593f806aac50133f01b733846e"}, - {file = "orjson-3.10.12-cp313-none-win32.whl", hash = "sha256:f45653775f38f63dc0e6cd4f14323984c3149c05d6007b58cb154dd080ddc0dc"}, - {file = "orjson-3.10.12-cp313-none-win_amd64.whl", hash = "sha256:229994d0c376d5bdc91d92b3c9e6be2f1fbabd4cc1b59daae1443a46ee5e9825"}, - {file = "orjson-3.10.12-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7d69af5b54617a5fac5c8e5ed0859eb798e2ce8913262eb522590239db6c6763"}, - {file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ed119ea7d2953365724a7059231a44830eb6bbb0cfead33fcbc562f5fd8f935"}, - {file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9c5fc1238ef197e7cad5c91415f524aaa51e004be5a9b35a1b8a84ade196f73f"}, - {file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43509843990439b05f848539d6f6198d4ac86ff01dd024b2f9a795c0daeeab60"}, - {file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f72e27a62041cfb37a3de512247ece9f240a561e6c8662276beaf4d53d406db4"}, - {file = "orjson-3.10.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a904f9572092bb6742ab7c16c623f0cdccbad9eeb2d14d4aa06284867bddd31"}, - {file = "orjson-3.10.12-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:855c0833999ed5dc62f64552db26f9be767434917d8348d77bacaab84f787d7b"}, - {file = "orjson-3.10.12-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:897830244e2320f6184699f598df7fb9db9f5087d6f3f03666ae89d607e4f8ed"}, - {file = "orjson-3.10.12-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:0b32652eaa4a7539f6f04abc6243619c56f8530c53bf9b023e1269df5f7816dd"}, - {file = "orjson-3.10.12-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:36b4aa31e0f6a1aeeb6f8377769ca5d125db000f05c20e54163aef1d3fe8e833"}, - {file = "orjson-3.10.12-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5535163054d6cbf2796f93e4f0dbc800f61914c0e3c4ed8499cf6ece22b4a3da"}, - {file = "orjson-3.10.12-cp38-none-win32.whl", hash = "sha256:90a5551f6f5a5fa07010bf3d0b4ca2de21adafbbc0af6cb700b63cd767266cb9"}, - {file = "orjson-3.10.12-cp38-none-win_amd64.whl", hash = "sha256:703a2fb35a06cdd45adf5d733cf613cbc0cb3ae57643472b16bc22d325b5fb6c"}, - {file = "orjson-3.10.12-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f29de3ef71a42a5822765def1febfb36e0859d33abf5c2ad240acad5c6a1b78d"}, - {file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de365a42acc65d74953f05e4772c974dad6c51cfc13c3240899f534d611be967"}, - {file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:91a5a0158648a67ff0004cb0df5df7dcc55bfc9ca154d9c01597a23ad54c8d0c"}, - {file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c47ce6b8d90fe9646a25b6fb52284a14ff215c9595914af63a5933a49972ce36"}, - {file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0eee4c2c5bfb5c1b47a5db80d2ac7aaa7e938956ae88089f098aff2c0f35d5d8"}, - {file = "orjson-3.10.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35d3081bbe8b86587eb5c98a73b97f13d8f9fea685cf91a579beddacc0d10566"}, - {file = "orjson-3.10.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73c23a6e90383884068bc2dba83d5222c9fcc3b99a0ed2411d38150734236755"}, - {file = "orjson-3.10.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5472be7dc3269b4b52acba1433dac239215366f89dc1d8d0e64029abac4e714e"}, - {file = "orjson-3.10.12-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:7319cda750fca96ae5973efb31b17d97a5c5225ae0bc79bf5bf84df9e1ec2ab6"}, - {file = "orjson-3.10.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:74d5ca5a255bf20b8def6a2b96b1e18ad37b4a122d59b154c458ee9494377f80"}, - {file = "orjson-3.10.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ff31d22ecc5fb85ef62c7d4afe8301d10c558d00dd24274d4bbe464380d3cd69"}, - {file = "orjson-3.10.12-cp39-none-win32.whl", hash = "sha256:c22c3ea6fba91d84fcb4cda30e64aff548fcf0c44c876e681f47d61d24b12e6b"}, - {file = "orjson-3.10.12-cp39-none-win_amd64.whl", hash = "sha256:be604f60d45ace6b0b33dd990a66b4526f1a7a186ac411c942674625456ca548"}, - {file = "orjson-3.10.12.tar.gz", hash = "sha256:0a78bbda3aea0f9f079057ee1ee8a1ecf790d4f1af88dd67493c6b8ee52506ff"}, + {file = "orjson-3.10.13-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1232c5e873a4d1638ef957c5564b4b0d6f2a6ab9e207a9b3de9de05a09d1d920"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d26a0eca3035619fa366cbaf49af704c7cb1d4a0e6c79eced9f6a3f2437964b6"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d4b6acd7c9c829895e50d385a357d4b8c3fafc19c5989da2bae11783b0fd4977"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1884e53c6818686891cc6fc5a3a2540f2f35e8c76eac8dc3b40480fb59660b00"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a428afb5720f12892f64920acd2eeb4d996595bf168a26dd9190115dbf1130d"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba5b13b8739ce5b630c65cb1c85aedbd257bcc2b9c256b06ab2605209af75a2e"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cab83e67f6aabda1b45882254b2598b48b80ecc112968fc6483fa6dae609e9f0"}, + {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:62c3cc00c7e776c71c6b7b9c48c5d2701d4c04e7d1d7cdee3572998ee6dc57cc"}, + {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:dc03db4922e75bbc870b03fc49734cefbd50fe975e0878327d200022210b82d8"}, + {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:22f1c9a30b43d14a041a6ea190d9eca8a6b80c4beb0e8b67602c82d30d6eec3e"}, + {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b42f56821c29e697c68d7d421410d7c1d8f064ae288b525af6a50cf99a4b1200"}, + {file = "orjson-3.10.13-cp310-cp310-win32.whl", hash = "sha256:0dbf3b97e52e093d7c3e93eb5eb5b31dc7535b33c2ad56872c83f0160f943487"}, + {file = "orjson-3.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:46c249b4e934453be4ff2e518cd1adcd90467da7391c7a79eaf2fbb79c51e8c7"}, + {file = "orjson-3.10.13-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a36c0d48d2f084c800763473020a12976996f1109e2fcb66cfea442fdf88047f"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0065896f85d9497990731dfd4a9991a45b0a524baec42ef0a63c34630ee26fd6"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92b4ec30d6025a9dcdfe0df77063cbce238c08d0404471ed7a79f309364a3d19"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a94542d12271c30044dadad1125ee060e7a2048b6c7034e432e116077e1d13d2"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3723e137772639af8adb68230f2aa4bcb27c48b3335b1b1e2d49328fed5e244c"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f00c7fb18843bad2ac42dc1ce6dd214a083c53f1e324a0fd1c8137c6436269b"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0e2759d3172300b2f892dee85500b22fca5ac49e0c42cfff101aaf9c12ac9617"}, + {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ee948c6c01f6b337589c88f8e0bb11e78d32a15848b8b53d3f3b6fea48842c12"}, + {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:aa6fe68f0981fba0d4bf9cdc666d297a7cdba0f1b380dcd075a9a3dd5649a69e"}, + {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:dbcd7aad6bcff258f6896abfbc177d54d9b18149c4c561114f47ebfe74ae6bfd"}, + {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2149e2fcd084c3fd584881c7f9d7f9e5ad1e2e006609d8b80649655e0d52cd02"}, + {file = "orjson-3.10.13-cp311-cp311-win32.whl", hash = "sha256:89367767ed27b33c25c026696507c76e3d01958406f51d3a2239fe9e91959df2"}, + {file = "orjson-3.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:dca1d20f1af0daff511f6e26a27354a424f0b5cf00e04280279316df0f604a6f"}, + {file = "orjson-3.10.13-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a3614b00621c77f3f6487792238f9ed1dd8a42f2ec0e6540ee34c2d4e6db813a"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c976bad3996aa027cd3aef78aa57873f3c959b6c38719de9724b71bdc7bd14b"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f74d878d1efb97a930b8a9f9898890067707d683eb5c7e20730030ecb3fb930"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33ef84f7e9513fb13b3999c2a64b9ca9c8143f3da9722fbf9c9ce51ce0d8076e"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd2bcde107221bb9c2fa0c4aaba735a537225104173d7e19cf73f70b3126c993"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:064b9dbb0217fd64a8d016a8929f2fae6f3312d55ab3036b00b1d17399ab2f3e"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0044b0b8c85a565e7c3ce0a72acc5d35cda60793edf871ed94711e712cb637d"}, + {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7184f608ad563032e398f311910bc536e62b9fbdca2041be889afcbc39500de8"}, + {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d36f689e7e1b9b6fb39dbdebc16a6f07cbe994d3644fb1c22953020fc575935f"}, + {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54433e421618cd5873e51c0e9d0b9fb35f7bf76eb31c8eab20b3595bb713cd3d"}, + {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e1ba0c5857dd743438acecc1cd0e1adf83f0a81fee558e32b2b36f89e40cee8b"}, + {file = "orjson-3.10.13-cp312-cp312-win32.whl", hash = "sha256:a42b9fe4b0114b51eb5cdf9887d8c94447bc59df6dbb9c5884434eab947888d8"}, + {file = "orjson-3.10.13-cp312-cp312-win_amd64.whl", hash = "sha256:3a7df63076435f39ec024bdfeb4c9767ebe7b49abc4949068d61cf4857fa6d6c"}, + {file = "orjson-3.10.13-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2cdaf8b028a976ebab837a2c27b82810f7fc76ed9fb243755ba650cc83d07730"}, + {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48a946796e390cbb803e069472de37f192b7a80f4ac82e16d6eb9909d9e39d56"}, + {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7d64f1db5ecbc21eb83097e5236d6ab7e86092c1cd4c216c02533332951afc"}, + {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:711878da48f89df194edd2ba603ad42e7afed74abcd2bac164685e7ec15f96de"}, + {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:cf16f06cb77ce8baf844bc222dbcb03838f61d0abda2c3341400c2b7604e436e"}, + {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8257c3fb8dd7b0b446b5e87bf85a28e4071ac50f8c04b6ce2d38cb4abd7dff57"}, + {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9c3a87abe6f849a4a7ac8a8a1dede6320a4303d5304006b90da7a3cd2b70d2c"}, + {file = "orjson-3.10.13-cp313-cp313-win32.whl", hash = "sha256:527afb6ddb0fa3fe02f5d9fba4920d9d95da58917826a9be93e0242da8abe94a"}, + {file = "orjson-3.10.13-cp313-cp313-win_amd64.whl", hash = "sha256:b5f7c298d4b935b222f52d6c7f2ba5eafb59d690d9a3840b7b5c5cda97f6ec5c"}, + {file = "orjson-3.10.13-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e49333d1038bc03a25fdfe11c86360df9b890354bfe04215f1f54d030f33c342"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:003721c72930dbb973f25c5d8e68d0f023d6ed138b14830cc94e57c6805a2eab"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:63664bf12addb318dc8f032160e0f5dc17eb8471c93601e8f5e0d07f95003784"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6066729cf9552d70de297b56556d14b4f49c8f638803ee3c90fd212fa43cc6af"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a1152e2761025c5d13b5e1908d4b1c57f3797ba662e485ae6f26e4e0c466388"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69b21d91c5c5ef8a201036d207b1adf3aa596b930b6ca3c71484dd11386cf6c3"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b12a63f48bb53dba8453d36ca2661f2330126d54e26c1661e550b32864b28ce3"}, + {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a5a7624ab4d121c7e035708c8dd1f99c15ff155b69a1c0affc4d9d8b551281ba"}, + {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:0fee076134398d4e6cb827002468679ad402b22269510cf228301b787fdff5ae"}, + {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ae537fcf330b3947e82c6ae4271e092e6cf16b9bc2cef68b14ffd0df1fa8832a"}, + {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f81b26c03f5fb5f0d0ee48d83cea4d7bc5e67e420d209cc1a990f5d1c62f9be0"}, + {file = "orjson-3.10.13-cp38-cp38-win32.whl", hash = "sha256:0bc858086088b39dc622bc8219e73d3f246fb2bce70a6104abd04b3a080a66a8"}, + {file = "orjson-3.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:3ca6f17467ebbd763f8862f1d89384a5051b461bb0e41074f583a0ebd7120e8e"}, + {file = "orjson-3.10.13-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4a11532cbfc2f5752c37e84863ef8435b68b0e6d459b329933294f65fa4bda1a"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c96d2fb80467d1d0dfc4d037b4e1c0f84f1fe6229aa7fea3f070083acef7f3d7"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dda4ba4d3e6f6c53b6b9c35266788053b61656a716a7fef5c884629c2a52e7aa"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f998bbf300690be881772ee9c5281eb9c0044e295bcd4722504f5b5c6092ff"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1cc42ed75b585c0c4dc5eb53a90a34ccb493c09a10750d1a1f9b9eff2bd12"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b0f29d485411e3c13d79604b740b14e4e5fb58811743f6f4f9693ee6480a8f"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:233aae4474078d82f425134bb6a10fb2b3fc5a1a1b3420c6463ddd1b6a97eda8"}, + {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e384e330a67cf52b3597ee2646de63407da6f8fc9e9beec3eaaaef5514c7a1c9"}, + {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4222881d0aab76224d7b003a8e5fdae4082e32c86768e0e8652de8afd6c4e2c1"}, + {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e400436950ba42110a20c50c80dff4946c8e3ec09abc1c9cf5473467e83fd1c5"}, + {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f47c9e7d224b86ffb086059cdcf634f4b3f32480f9838864aa09022fe2617ce2"}, + {file = "orjson-3.10.13-cp39-cp39-win32.whl", hash = "sha256:a9ecea472f3eb653e1c0a3d68085f031f18fc501ea392b98dcca3e87c24f9ebe"}, + {file = "orjson-3.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:5385935a73adce85cc7faac9d396683fd813566d3857fa95a0b521ef84a5b588"}, + {file = "orjson-3.10.13.tar.gz", hash = "sha256:eb9bfb14ab8f68d9d9492d4817ae497788a15fd7da72e14dfabc289c3bb088ec"}, ] [[package]] @@ -5319,31 +5323,43 @@ python-versions = ">=3.9" files = [ {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, @@ -6558,6 +6574,7 @@ python-versions = ">=3.9" files = [ {file = "pymupdf-1.25.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:793f9f6d51029e97851c711b3f6d9fe912313d95a306fbe8b1866f301d0e2bd3"}, {file = "pymupdf-1.25.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:15e6f4013ad0a029a2221920f9d2081f56dc43259dabfdf5cad7fbf1cee4b5a7"}, + {file = "pymupdf-1.25.1-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a39afbd80381f43e30d6eb2ec4613f465f507ac2b76070abdd2da8724f32ef36"}, {file = "pymupdf-1.25.1-cp39-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b63f8e9e65b0bda48f9217efd4d2a8c6d7a739dd28baf460c1ae78439b9af489"}, {file = "pymupdf-1.25.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a687bd387589e30abd810a78a23341f57f43fa16a4d8d8c0b870bb6d89607343"}, {file = "pymupdf-1.25.1-cp39-abi3-win32.whl", hash = "sha256:fc7dbc1aa9e298a4c81084e389c9623c26fcaa232c71efaa073af150069e2221"}, @@ -8247,13 +8264,13 @@ type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (>=1.12 [[package]] name = "sglang" -version = "0.4.1.post1" +version = "0.4.1.post3" description = "SGLang is yet another fast serving framework for large language models and vision language models." optional = true python-versions = ">=3.8" files = [ - {file = "sglang-0.4.1.post1-py3-none-any.whl", hash = "sha256:84addac9ef78b1df32dc23b6fa943af68bc54a5245763a8ddb8635be7934cf9d"}, - {file = "sglang-0.4.1.post1.tar.gz", hash = "sha256:b2463febb5e67b812523b0f55a2c21da60bf13d393534af0e51702a3ba0eae80"}, + {file = "sglang-0.4.1.post3-py3-none-any.whl", hash = "sha256:22a49bbbd8399f3d4ec377f0b6eeb10bb4ae648702adb02f8af64e72f77b61f7"}, + {file = "sglang-0.4.1.post3.tar.gz", hash = "sha256:7f1feba3d69af9a0ba5826d081c29d32c9422099f546ce299d71b3317e973aec"}, ] [package.dependencies] @@ -9436,13 +9453,13 @@ files = [ [[package]] name = "types-pyyaml" -version = "6.0.12.20241221" +version = "6.0.12.20241230" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" files = [ - {file = "types_PyYAML-6.0.12.20241221-py3-none-any.whl", hash = "sha256:0657a4ff8411a030a2116a196e8e008ea679696b5b1a8e1a6aa8ebb737b34688"}, - {file = "types_pyyaml-6.0.12.20241221.tar.gz", hash = "sha256:4f149aa893ff6a46889a30af4c794b23833014c469cc57cbc3ad77498a58996f"}, + {file = "types_PyYAML-6.0.12.20241230-py3-none-any.whl", hash = "sha256:fa4d32565219b68e6dee5f67534c722e53c00d1cfc09c435ef04d7353e1e96e6"}, + {file = "types_pyyaml-6.0.12.20241230.tar.gz", hash = "sha256:7f07622dbd34bb9c8b264fe860a17e0efcad00d50b5f27e93984909d9363498c"}, ] [[package]] From 80f6587b18df6a60a53f0227855403e0022bd60c Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Wed, 1 Jan 2025 15:48:09 -0600 Subject: [PATCH 05/28] openai worked --- camel/agents/chat_agent.py | 40 +++++++++++----------------------- camel/configs/openai_config.py | 3 ++- camel/models/base_model.py | 3 ++- 3 files changed, 17 insertions(+), 29 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index a42ee3ec2e..4b078ad7df 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -267,16 +267,6 @@ def remove_tool(self, tool_name: str) -> bool: return True return False - @property - def has_tools(self) -> bool: - r"""Whether tool calling is enabled for this agent. - - Returns: - bool: Whether tool calling is enabled for this agent, determined - by whether the dictionary of tools is empty. - """ - return len(self._tools) > 0 - def update_memory( self, message: BaseMessage, role: OpenAIBackendRole ) -> None: @@ -395,14 +385,6 @@ def step( # Add user input to memory self.update_memory(input_message, OpenAIBackendRole.USER) - return self._handle_step(response_format) - - def _handle_step( - self, - response_format: Optional[Type[BaseModel]], - ) -> ChatAgentResponse: - r"""Handles a single or multi-step interaction.""" - # Record function calls made during the session tool_call_records: List[FunctionCallingRecord] = [] while True: @@ -424,7 +406,6 @@ def _handle_step( openai_messages, response_format, num_tokens ) - # Single-step mode if self.single_iteration: break @@ -434,6 +415,9 @@ def _handle_step( and response.choices[0].message.tool_calls ): tool_call_records.append(self._step_tool_call(response)) + continue + + break # Final info and response info = self._step_get_info( @@ -513,17 +497,19 @@ async def step_async( openai_messages, response_format, num_tokens ) + if self.single_iteration: + break + if ( - not self.has_tools - or not isinstance(response, ChatCompletion) - or not response.choices[0].message.tool_calls + isinstance(response, ChatCompletion) + and response.choices[0].message.tool_calls ): - break + tool_call_records.append( + await self._step_tool_call_async(response) + ) + continue - # Normal function calling - tool_call_records.append( - await self._step_tool_call_async(response) - ) + break info = self._step_get_info( output_messages, diff --git a/camel/configs/openai_config.py b/camel/configs/openai_config.py index 9397e98e17..b787a3bf0e 100644 --- a/camel/configs/openai_config.py +++ b/camel/configs/openai_config.py @@ -18,7 +18,6 @@ from pydantic import BaseModel, Field from camel.configs.base_config import BaseConfig -from camel.toolkits import FunctionTool class ChatGPTConfig(BaseConfig): @@ -120,6 +119,8 @@ def as_dict(self) -> Dict[str, Any]: Dict[str, Any]: A dictionary representation of the current configuration. """ + from camel.toolkits import FunctionTool + config_dict = self.model_dump() if self.tools: tools_schema = [] diff --git a/camel/models/base_model.py b/camel/models/base_model.py index 909b5deab1..c689933515 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -110,7 +110,8 @@ def run( self.model_config_dict.get("response_format", None) or response_format ) - tools = self.model_config_dict.get("tools", None) or tools + # If tools are empty, make it None + tools = self.model_config_dict.get("tools", None) or tools or None return self._run(messages, response_format, tools) @abstractmethod From 7544b5c35de909a7b8bba180a87d24ce4fd14219 Mon Sep 17 00:00:00 2001 From: liuxukun2000 Date: Fri, 3 Jan 2025 00:52:23 -0600 Subject: [PATCH 06/28] enable async in some modelbackend --- camel/models/anthropic_model.py | 35 +++++++++++- camel/models/azure_openai_model.py | 35 +++++++++++- camel/models/base_model.py | 40 ++++++++++++++ camel/models/cohere_model.py | 51 ++++++++++++++++++ camel/models/deepseek_model.py | 32 ++++++++++- camel/models/openai_audio_models.py | 8 ++- camel/models/openai_compatible_model.py | 33 +++++++++++- camel/models/openai_model.py | 71 ++++++++++++++++++++++++- 8 files changed, 299 insertions(+), 6 deletions(-) diff --git a/camel/models/anthropic_model.py b/camel/models/anthropic_model.py index f3601293db..5a3af6e34a 100644 --- a/camel/models/anthropic_model.py +++ b/camel/models/anthropic_model.py @@ -61,7 +61,7 @@ def __init__( url: Optional[str] = None, token_counter: Optional[BaseTokenCounter] = None, ) -> None: - from anthropic import Anthropic + from anthropic import Anthropic, AsyncAnthropic if model_config_dict is None: model_config_dict = AnthropicConfig().as_dict() @@ -71,6 +71,7 @@ def __init__( model_type, model_config_dict, api_key, url, token_counter ) self.client = Anthropic(api_key=self._api_key, base_url=self._url) + self.async_client = AsyncAnthropic(api_key=self._api_key, base_url=self._url) def _convert_response_from_anthropic_to_openai(self, response): # openai ^1.0.0 format, reference openai/types/chat/chat_completion.py @@ -158,6 +159,38 @@ def _run( return response + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ): + r"""Run inference of Anthropic chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + ChatCompletion: Response in the OpenAI API format. + """ + from anthropic import NOT_GIVEN + + if messages[0]["role"] == "system": + sys_msg = str(messages.pop(0)["content"]) + else: + sys_msg = NOT_GIVEN # type: ignore[assignment] + response = await self.async_client.messages.create( + model=self.model_type, + system=sys_msg, + messages=messages, # type: ignore[arg-type] + **self.model_config_dict, + ) + + # format response to openai format + response = self._convert_response_from_anthropic_to_openai(response) + + return response def check_model_config(self): r"""Check whether the model configuration is valid for anthropic model backends. diff --git a/camel/models/azure_openai_model.py b/camel/models/azure_openai_model.py index 8dced3f0da..b9600fa105 100644 --- a/camel/models/azure_openai_model.py +++ b/camel/models/azure_openai_model.py @@ -14,7 +14,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AzureOpenAI, Stream +from openai import AzureOpenAI, Stream, AsyncAzureOpenAI from pydantic import BaseModel from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig @@ -96,6 +96,15 @@ def __init__( max_retries=3, ) + self._async_client = AsyncAzureOpenAI( + azure_endpoint=str(self._url), + azure_deployment=self.azure_deployment_name, + api_version=self.api_version, + api_key=self._api_key, + timeout=180, + max_retries=3, + ) + @property def token_counter(self) -> BaseTokenCounter: r"""Initialize the token counter for the model backend. @@ -132,6 +141,30 @@ def _run( ) return response + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of Azure OpenAI chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + response = self._async_client.chat.completions.create( + messages=messages, + model=self.azure_deployment_name, # type:ignore[arg-type] + **self.model_config_dict, + ) + return response + def check_model_config(self): r"""Check whether the model configuration contains any unexpected arguments to Azure OpenAI API. diff --git a/camel/models/base_model.py b/camel/models/base_model.py index c689933515..92289f76fa 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -82,6 +82,16 @@ def _run( tools: Optional[List[Dict[str, Any]]], ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: pass + + # TODO: Add the async version of the run method + # @abstractmethod + def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + pass def run( self, @@ -114,6 +124,36 @@ def run( tools = self.model_config_dict.get("tools", None) or tools or None return self._run(messages, response_format, tools) + async def arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs the query to the backend model asynchronously. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + response_format (Optional[Type[BaseModel]]): The response format + to use for the model. (default: :obj:`None`) + tools (Optional[List[Tool]]): The schema of tools to use for the + model for this request. Will override the tools specified in + the model configuration (but not change the configuration). + (default: :obj:`None`) + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + response_format = ( + self.model_config_dict.get("response_format", None) or response_format + ) + # If tools are empty, make it None + tools = self.model_config_dict.get("tools", None) or tools or None + return await self._arun(messages, response_format, tools) + @abstractmethod def check_model_config(self): r"""Check whether the input model configuration contains unexpected diff --git a/camel/models/cohere_model.py b/camel/models/cohere_model.py index 9e4e69d553..e328d37751 100644 --- a/camel/models/cohere_model.py +++ b/camel/models/cohere_model.py @@ -69,6 +69,7 @@ def __init__( model_type, model_config_dict, api_key, url, token_counter ) self._client = cohere.ClientV2(api_key=self._api_key) + self._async_client = cohere.AsyncClientV2(api_key=self._api) def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion: if response.usage and response.usage.tokens: @@ -267,6 +268,56 @@ def _run( return openai_response + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> ChatCompletion: + r"""Runs inference of Cohere chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + Returns: + ChatCompletion. + """ + from cohere.core.api_error import ApiError + + cohere_messages = self._to_cohere_chatmessage(messages) + + try: + response = await self._async_client.chat( + messages=cohere_messages, + model=self.model_type, + **self.model_config_dict, + ) + except ApiError as e: + logging.error(f"Cohere API Error: {e.status_code}") + logging.error(f"Error body: {e.body}") + raise + except Exception as e: + logging.error(f"Unexpected error when calling Cohere API: {e!s}") + raise + + openai_response = self._to_openai_response(response) + + # Add AgentOps LLM Event tracking + if LLMEvent: + llm_event = LLMEvent( + thread_id=openai_response.id, + prompt=" ".join( + [message.get("content") for message in messages] # type: ignore[misc] + ), + prompt_tokens=openai_response.usage.prompt_tokens, # type: ignore[union-attr] + completion=openai_response.choices[0].message.content, + completion_tokens=openai_response.usage.completion_tokens, # type: ignore[union-attr] + model=self.model_type, + ) + record(llm_event) + + return openai_response + def check_model_config(self): r"""Check whether the model configuration contains any unexpected arguments to Cohere API. diff --git a/camel/models/deepseek_model.py b/camel/models/deepseek_model.py index 42a8983e53..d1753375f8 100644 --- a/camel/models/deepseek_model.py +++ b/camel/models/deepseek_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import OpenAI, Stream, AsyncOpenAI from pydantic import BaseModel from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig @@ -81,6 +81,13 @@ def __init__( api_key=self._api_key, base_url=self._url, ) + + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) @property def token_counter(self) -> BaseTokenCounter: @@ -120,6 +127,29 @@ def _run( ) return response + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of DeepSeek chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletion + """ + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response def check_model_config(self): r"""Check whether the model configuration contains any unexpected arguments to DeepSeek API. diff --git a/camel/models/openai_audio_models.py b/camel/models/openai_audio_models.py index e4d05c8f95..ac2d5b0b35 100644 --- a/camel/models/openai_audio_models.py +++ b/camel/models/openai_audio_models.py @@ -14,7 +14,7 @@ import os from typing import Any, List, Optional, Union -from openai import OpenAI, _legacy_response +from openai import OpenAI, _legacy_response, AsyncOpenAI from camel.types import AudioModelType, VoiceType @@ -37,6 +37,12 @@ def __init__( base_url=self._url, api_key=self._api_key, ) + self._async_client = AsyncOpenAI( + timeout=120, + max_retries=3, + base_url=self._url, + api_key=self._api_key, + ) def text_to_speech( self, diff --git a/camel/models/openai_compatible_model.py b/camel/models/openai_compatible_model.py index 4e2da8855b..45c0c822aa 100644 --- a/camel/models/openai_compatible_model.py +++ b/camel/models/openai_compatible_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import OpenAI, Stream, AsyncOpenAI from pydantic import BaseModel from camel.messages import OpenAIMessage @@ -68,6 +68,13 @@ def __init__( base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) + def _run( self, messages: List[OpenAIMessage], @@ -92,6 +99,30 @@ def _run( ) return response + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of OpenAI chat completion in async mode. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + response = self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response + @property def token_counter(self) -> BaseTokenCounter: r"""Initialize the token counter for the model backend. diff --git a/camel/models/openai_model.py b/camel/models/openai_model.py index d5efc67041..b3d78abd15 100644 --- a/camel/models/openai_model.py +++ b/camel/models/openai_model.py @@ -15,7 +15,7 @@ import warnings from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import OpenAI, Stream, AsyncOpenAI from pydantic import BaseModel from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig @@ -92,6 +92,12 @@ def __init__( base_url=self._url, api_key=self._api_key, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + base_url=self._url, + api_key=self._api_key, + ) def _sanitize_model_config(self) -> None: """Sanitize the model configuration for O1 models.""" @@ -150,6 +156,32 @@ def _run( else: return self._request_chat_completion(messages, tools) + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of OpenAI chat completion in async mode. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + response_format (Optional[Type[BaseModel]]): The format of the + response. + tools (Optional[List[Dict[str, Any]]]): The schema of the tools to + use for the request. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + if response_format: + return await self._arequest_parse(messages, response_format, tools) + else: + return await self._arequest_chat_completion(messages, tools) + def _request_chat_completion( self, messages: List[OpenAIMessage], @@ -169,6 +201,25 @@ def _request_chat_completion( **request_config, ) + async def _arequest_chat_completion( + self, + messages: List[OpenAIMessage], + tools: Optional[List[Dict[str, Any]]], + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + request_config = self.model_config_dict.copy() + + if tools is not None: + for tool in tools: + function_dict = tool.get('function', {}) + function_dict.pop("strict", None) + request_config["tools"] = tools + + return await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **request_config, + ) + def _request_parse( self, messages: List[OpenAIMessage], @@ -187,6 +238,24 @@ def _request_parse( **request_config, ) + async def _arequest_parse( + self, + messages: List[OpenAIMessage], + response_format: Type[BaseModel], + tools: Optional[List[Dict[str, Any]]], + ) -> ChatCompletion: + request_config = self.model_config_dict.copy() + + request_config["response_format"] = response_format + request_config.pop("stream", None) + request_config["tools"] = tools + + return await self._async_client.beta.chat.completions.parse( + messages=messages, + model=self.model_type, + **request_config, + ) + def check_model_config(self): r"""Check whether the model configuration contains any unexpected arguments to OpenAI API. From ffe13c970062a4ff38c037e48603235186c53301 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Fri, 3 Jan 2025 17:08:56 -0600 Subject: [PATCH 07/28] tool refactored --- camel/agents/chat_agent.py | 426 ++++++++++++++++++++----------------- 1 file changed, 230 insertions(+), 196 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 4b078ad7df..410d80788e 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -23,12 +23,13 @@ Dict, List, Optional, - Tuple, + Set, Type, Union, ) -from pydantic import BaseModel +from openai import Stream +from pydantic import BaseModel, ConfigDict from camel.agents.base import BaseAgent from camel.memories import ( @@ -49,18 +50,15 @@ from camel.types import ( ChatCompletion, ChatCompletionChunk, + Choice, ModelPlatformType, ModelType, OpenAIBackendRole, RoleType, ) -from camel.utils import ( - get_model_encoding, -) +from camel.utils import get_model_encoding if TYPE_CHECKING: - from openai import Stream - from camel.terminators import ResponseTerminator @@ -78,6 +76,43 @@ from camel.utils import track_agent +def _convert_to_function_tool( + tool: Union[FunctionTool, Callable], +) -> FunctionTool: + return tool if isinstance(tool, FunctionTool) else FunctionTool(tool) + + +def _convert_to_schema( + tool: Union[FunctionTool, Callable, Dict[str, Any]], +) -> Dict[str, Any]: + if isinstance(tool, FunctionTool): + return tool.get_openai_tool_schema() + elif callable(tool): + return FunctionTool(tool).get_openai_tool_schema() + else: + return tool + + +class _ToolCallRequest(BaseModel): + r"""The request for tool calling.""" + + func_name: str + args: Dict[str, Any] + + +class _ModelResponse(BaseModel): + r"""The response from the model.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + response: Union[ChatCompletion, Stream] + tool_call_request: Optional[_ToolCallRequest] + output_messages: List[BaseMessage] + finish_reasons: List[str] + usage_dict: Dict[str, Any] + response_id: str + + class FunctionCallingRecord(BaseModel): r"""Historical records of functions called in the conversation. @@ -210,14 +245,19 @@ def __init__( ) # Set up tools - self._tools = ( - [ - tool if isinstance(tool, FunctionTool) else FunctionTool(tool) - for tool in tools + self._internal_tools = { + tool.get_function_name(): tool + for tool in [ + _convert_to_function_tool(tool) for tool in (tools or []) ] - if tools - else [] - ) + } + + self._external_tool_schemas = { + tool_schema["name"]: tool_schema + for tool_schema in [ + _convert_to_schema(tool) for tool in (external_tools or []) + ] + } # Set up other properties self.terminated = False @@ -237,33 +277,57 @@ def system_message(self) -> Optional[BaseMessage]: @property def tool_dict(self) -> Dict[str, FunctionTool]: - return {tool.get_function_name(): tool for tool in self._tools} + return self._internal_tools - @property - def tool_list(self) -> List[str]: - return [tool.get_function_name() for tool in self._tools] + def _get_full_tool_schemas(self) -> List[Dict[str, Any]]: + r"""Returns a list of tool schemas of all tools, including internal + and external tools. + """ + return list(self._external_tool_schemas.values()) + [ + func_tool.get_openai_tool_schema() + for func_tool in self._internal_tools.values() + ] - @property - def tool_schemas(self) -> List[Dict[str, Any]]: - return [tool.get_openai_tool_schema() for tool in self._tools] + def _get_external_tool_names(self) -> Set[str]: + r"""Returns a set of external tool names.""" + return set(self._external_tool_schemas.keys()) def add_tool(self, tool: Union[FunctionTool, Callable]) -> None: r"""Add a tool to the agent.""" - new_tool = ( - tool if isinstance(tool, FunctionTool) else FunctionTool(tool) - ) - self._tools.append(new_tool) + new_tool = _convert_to_function_tool(tool) + self._internal_tools[new_tool.get_function_name()] = new_tool + + def add_external_tool( + self, tool: Union[FunctionTool, Callable, Dict[str, Any]] + ) -> None: + new_tool_schema = _convert_to_schema(tool) + self._external_tool_schemas[new_tool_schema["name"]] = new_tool_schema def remove_tool(self, tool_name: str) -> bool: r"""Remove a tool from the agent by name. + Args: + tool_name (str): The name of the tool to remove. + Returns: bool: Whether the tool was successfully removed. """ - for tool in self._tools: - if tool.get_function_name() != tool_name: - continue - self._tools.remove(tool) + if tool_name in self._internal_tools: + del self._internal_tools[tool_name] + return True + return False + + def remove_external_tool(self, tool_name: str) -> bool: + r"""Remove an external tool from the agent by name. + + Args: + tool_name (str): The name of the tool to remove. + + Returns: + bool: Whether the tool was successfully removed. + """ + if tool_name in self._external_tool_schemas: + del self._external_tool_schemas[tool_name] return True return False @@ -286,6 +350,9 @@ def _update_system_message_for_output_language(self) -> None: language determines the language in which the output text should be generated. """ + if not self._output_language: + return + language_prompt = ( "\nRegardless of the input language, " f"you must output text in {self._output_language}." @@ -301,7 +368,7 @@ def _update_system_message_for_output_language(self) -> None: content=language_prompt, ) - def get_info( + def _get_info_dict( self, session_id: Optional[str], usage: Optional[Dict[str, int]], @@ -396,42 +463,40 @@ def step( ) # Process model response - ( - response, - output_messages, - finish_reasons, - usage_dict, - response_id, - ) = self._step_model_response( + response = self._get_model_response( openai_messages, response_format, num_tokens ) if self.single_iteration: break + # TODO: return with external tools + # Handle tool requests - if ( - isinstance(response, ChatCompletion) - and response.choices[0].message.tool_calls - ): - tool_call_records.append(self._step_tool_call(response)) + if response.tool_call_request: + tool_call_records.append( + self._execute_tool(response.tool_call_request) + ) continue break # Final info and response info = self._step_get_info( - output_messages, - finish_reasons, - usage_dict, - response_id, + response.output_messages, + response.finish_reasons, + response.usage_dict, + response.response_id, tool_call_records, num_tokens, ) - self._log_final_output(output_messages) + self._log_final_output(response.output_messages) + return ChatAgentResponse( - msgs=output_messages, terminated=self.terminated, info=info + msgs=response.output_messages, + terminated=self.terminated, + info=info, ) def _log_final_output(self, output_messages: List[BaseMessage]) -> None: @@ -487,42 +552,33 @@ async def step_async( e.args[1], tool_call_records, "max_tokens_exceeded" ) - ( - response, - output_messages, - finish_reasons, - usage_dict, - response_id, - ) = self._step_model_response( + response = self._get_model_response( openai_messages, response_format, num_tokens ) if self.single_iteration: break - if ( - isinstance(response, ChatCompletion) - and response.choices[0].message.tool_calls - ): + if tool_call_request := response.tool_call_request: tool_call_records.append( - await self._step_tool_call_async(response) + await self._execute_tool_async(tool_call_request) ) continue break info = self._step_get_info( - output_messages, - finish_reasons, - usage_dict, - response_id, + response.output_messages, + response.finish_reasons, + response.usage_dict, + response.response_id, tool_call_records, num_tokens, ) - if len(output_messages) == 1: + if len(response.output_messages) == 1: # Auto record if the output result is a single message - self.record_message(output_messages[0]) + self.record_message(response.output_messages[0]) else: logger.warning( "Multiple messages returned in `step()`, message won't be " @@ -531,39 +587,31 @@ async def step_async( ) return ChatAgentResponse( - msgs=output_messages, terminated=self.terminated, info=info + msgs=response.output_messages, + terminated=self.terminated, + info=info, ) - def _step_model_response( + def _get_model_response( self, openai_messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]], num_tokens: int, - ) -> tuple[ - Union[ChatCompletion, Stream], - List[BaseMessage], - List[str], - Dict[str, int], - str, - ]: + ) -> _ModelResponse: r"""Internal function for agent step model response.""" response = None - # Obtain the model's response - for _ in range(len(self.model_backend.models)): - try: - response = self.model_backend.run( - openai_messages, response_format, self.tool_schemas - ) - break - except Exception as exc: - logger.error( - f"An error occurred while running model " - f"{self.model_backend.model_type}, " - f"index: {self.model_backend.current_model_index}", - exc_info=exc, - ) - continue + try: + response = self.model_backend.run( + openai_messages, response_format, self._get_full_tool_schemas() + ) + except Exception as exc: + logger.error( + f"An error occurred while running model " + f"{self.model_backend.model_type}, " + f"index: {self.model_backend.current_model_index}", + exc_info=exc, + ) if not response: raise ModelProcessingError( "Unable to process messages: none of the provided models " @@ -577,20 +625,9 @@ def _step_model_response( ) if isinstance(response, ChatCompletion): - output_messages, finish_reasons, usage_dict, response_id = ( - self.handle_batch_response(response) - ) + return self._handle_batch_response(response) else: - output_messages, finish_reasons, usage_dict, response_id = ( - self.handle_stream_response(response, num_tokens) - ) - return ( - response, - output_messages, - finish_reasons, - usage_dict, - response_id, - ) + return self._handle_stream_response(response, num_tokens) def _step_get_info( self, @@ -648,59 +685,40 @@ def _step_get_info( if self.terminated and termination_reason is not None: finish_reasons = [termination_reason] * len(finish_reasons) - info = self.get_info( + return self._get_info_dict( response_id, usage_dict, finish_reasons, num_tokens, tool_calls, ) - return info - def handle_batch_response( + def _handle_batch_response( self, response: ChatCompletion - ) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]: + ) -> _ModelResponse: r"""Process a batch response from the model and extract the necessary information. Args: - response (dict): Model response. + response (ChatCompletion): Model response. Returns: - tuple: A tuple of list of output `ChatMessage`, list of - finish reasons, usage dictionary, and response id. + _ModelResponse: parsed model response. """ output_messages: List[BaseMessage] = [] for choice in response.choices: + meta_dict = {} + if logprobs_info := self._handle_logprobs(choice): + meta_dict["logprobs_info"] = logprobs_info + chat_message = BaseMessage( role_name=self.role_name, role_type=self.role_type, - meta_dict=dict(), + meta_dict=meta_dict, content=choice.message.content or "", parsed=getattr(choice.message, "parsed", None), ) - # Process log probabilities, append to the message meta information - if choice.logprobs is not None: - tokens_logprobs = choice.logprobs.content - - if tokens_logprobs is not None: - # Extract and structure logprob information - logprobs_info = [ - { - "token": token_logprob.token, - "logprob": token_logprob.logprob, - "top_logprobs": [ - (top_logprob.token, top_logprob.logprob) - for top_logprob in token_logprob.top_logprobs - ], - } - for token_logprob in tokens_logprobs - ] - # Ensure meta_dict exists before adding logprobs info - if chat_message.meta_dict is None: - chat_message.meta_dict = {} - chat_message.meta_dict["logprobs_info"] = logprobs_info - # Append the processed chat message to output + output_messages.append(chat_message) finish_reasons = [ @@ -711,12 +729,48 @@ def handle_batch_response( if response.usage is not None else {} ) - return ( - output_messages, - finish_reasons, - usage, - response.id, - ) + + tool_call_request = None + if tool_calls := response.choices[0].message.tool_calls: + func_name = tool_calls[0].function.name + args = json.loads(tool_calls[0].function.arguments) + tool_call_request = _ToolCallRequest( + func_name=func_name, args=args + ) + + return _ModelResponse( + response=response, + tool_call_request=tool_call_request, + output_messages=output_messages, + finish_reasons=finish_reasons, + usage_dict=usage, + response_id=response.id, + ) + + def _handle_logprobs( + self, choice: Choice + ) -> Optional[List[Dict[str, Any]]]: + # Process log probabilities, append to the message meta information + if choice.logprobs is None: + return None + + tokens_logprobs = choice.logprobs.content + + if tokens_logprobs is None: + return None + + # Extract and structure logprob information + return [ + { + "token": token_logprob.token, + "logprob": token_logprob.logprob, + "top_logprobs": [ + (top_logprob.token, top_logprob.logprob) + for top_logprob in token_logprob.top_logprobs + ], + } + for token_logprob in tokens_logprobs + ] def _safe_model_dump(self, obj) -> dict: r"""Safely dump a Pydantic model to a dictionary. @@ -739,11 +793,11 @@ def _safe_model_dump(self, obj) -> dict: else: raise TypeError("The object is not a Pydantic model") - def handle_stream_response( + def _handle_stream_response( self, response: Stream[ChatCompletionChunk], prompt_tokens: int, - ) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]: + ) -> _ModelResponse: r"""Process a stream response from the model and extract the necessary information. @@ -752,8 +806,7 @@ def handle_stream_response( prompt_tokens (int): Number of input prompt tokens. Returns: - tuple: A tuple of list of output `ChatMessage`, list of - finish reasons, usage dictionary, and response id. + _ModelResponse: a parsed model response. """ content_dict: defaultdict = defaultdict(lambda: "") finish_reasons_dict: defaultdict = defaultdict(lambda: "") @@ -782,7 +835,16 @@ def handle_stream_response( finish_reasons_dict[i] for i in range(len(finish_reasons_dict)) ] usage_dict = self.get_usage_dict(output_messages, prompt_tokens) - return output_messages, finish_reasons, usage_dict, response_id + + # TODO: Handle tool calls + return _ModelResponse( + response=response, + tool_call_request=None, + output_messages=output_messages, + finish_reasons=finish_reasons, + usage_dict=usage_dict, + response_id=response_id, + ) def _step_token_exceed( self, @@ -804,9 +866,8 @@ def _step_token_exceed( information about token number and called functions. """ self.terminated = True - output_messages: List[BaseMessage] = [] - info = self.get_info( + info = self._get_info_dict( None, None, [termination_reason], @@ -815,36 +876,28 @@ def _step_token_exceed( ) return ChatAgentResponse( - msgs=output_messages, + msgs=[], terminated=self.terminated, info=info, ) - def _step_tool_call( + def _execute_tool( self, - response: ChatCompletion, + tool_call_request: _ToolCallRequest, ) -> FunctionCallingRecord: - r"""Execute the function with arguments following the model's response. + r"""Execute the tool with arguments following the model's response. Args: - response (Dict[str, Any]): The response obtained by calling the - model. + tool_call_request (_ToolCallRequest): The tool call request. Returns: - tuple: A tuple consisting of two obj:`FunctionCallingMessage`, - one about the arguments and the other about the execution - result, and a struct for logging information about this + FunctionCallingRecord: A struct for logging information about this function call. """ - choice = response.choices[0] - if choice.message.tool_calls is None: - raise RuntimeError("Tool call is None") - func_name = choice.message.tool_calls[0].function.name + func_name = tool_call_request.func_name + args = tool_call_request.args - arguments_str = choice.message.tool_calls[0].function.arguments - args = self._safe_json_loads(arguments_str) - - tool = self.tool_dict[func_name] + tool = self._internal_tools[func_name] result = tool(**args) assist_msg = FunctionCallingMessage( @@ -864,52 +917,33 @@ def _step_tool_call( result=result, ) + self.update_memory(assist_msg, OpenAIBackendRole.ASSISTANT) + self.update_memory(func_msg, OpenAIBackendRole.FUNCTION) + # Record information about this function call func_record = FunctionCallingRecord( func_name=func_name, args=args, result=result ) - self.update_memory(assist_msg, OpenAIBackendRole.ASSISTANT) - self.update_memory(func_msg, OpenAIBackendRole.FUNCTION) return func_record - def _safe_json_loads(self, arguments_str): - # Replace Python types with their JSON equivalents - arguments_str = arguments_str.replace("None", "null") - arguments_str = arguments_str.replace("True", "true") - arguments_str = arguments_str.replace("False", "false") - - # Attempt to parse the corrected string - try: - return json.loads(arguments_str) - except json.JSONDecodeError as e: - raise ValueError(f"Invalid JSON format: {e}") - - async def _step_tool_call_async( + async def _execute_tool_async( self, - response: ChatCompletion, + tool_call_request: _ToolCallRequest, ) -> FunctionCallingRecord: - r"""Execute the async function with arguments following the model's + r"""Execute the async tool with arguments following the model's response. Args: - response (Dict[str, Any]): The response obtained by calling the - model. + tool_call_request (_ToolCallRequest): The tool call request. Returns: - tuple: A tuple consisting of two obj:`FunctionCallingMessage`, - one about the arguments and the other about the execution - result, and a struct for logging information about this + FunctionCallingRecord: A struct for logging information about this function call. """ - # Note that when function calling is enabled, `n` is set to 1. - choice = response.choices[0] - if choice.message.tool_calls is None: - raise RuntimeError("Tool call is None") - func_name = choice.message.tool_calls[0].function.name - - args = json.loads(choice.message.tool_calls[0].function.arguments) - tool = self.tool_dict[func_name] + func_name = tool_call_request.func_name + args = tool_call_request.args + tool = self._internal_tools[func_name] result = await tool(**args) assist_msg = FunctionCallingMessage( @@ -929,14 +963,14 @@ async def _step_tool_call_async( result=result, ) + self.update_memory(assist_msg, OpenAIBackendRole.ASSISTANT) + self.update_memory(func_msg, OpenAIBackendRole.FUNCTION) + # Record information about this function call func_record = FunctionCallingRecord( func_name=func_name, args=args, result=result ) - self.update_memory(assist_msg, OpenAIBackendRole.ASSISTANT) - self.update_memory(func_msg, OpenAIBackendRole.FUNCTION) - return func_record def get_usage_dict( From 2e0cf7cfbd73192838e374a0201869e5359aa934 Mon Sep 17 00:00:00 2001 From: liuxukun2000 Date: Mon, 6 Jan 2025 00:07:49 -0600 Subject: [PATCH 08/28] aadd async run --- camel/models/anthropic_model.py | 5 ++++- camel/models/azure_openai_model.py | 2 +- camel/models/base_model.py | 5 +++-- camel/models/deepseek_model.py | 5 +++-- camel/models/openai_audio_models.py | 2 +- camel/models/openai_compatible_model.py | 2 +- camel/models/openai_model.py | 2 +- 7 files changed, 14 insertions(+), 9 deletions(-) diff --git a/camel/models/anthropic_model.py b/camel/models/anthropic_model.py index 5a3af6e34a..296dc33e90 100644 --- a/camel/models/anthropic_model.py +++ b/camel/models/anthropic_model.py @@ -71,7 +71,9 @@ def __init__( model_type, model_config_dict, api_key, url, token_counter ) self.client = Anthropic(api_key=self._api_key, base_url=self._url) - self.async_client = AsyncAnthropic(api_key=self._api_key, base_url=self._url) + self.async_client = AsyncAnthropic( + api_key=self._api_key, base_url=self._url + ) def _convert_response_from_anthropic_to_openai(self, response): # openai ^1.0.0 format, reference openai/types/chat/chat_completion.py @@ -191,6 +193,7 @@ async def _arun( response = self._convert_response_from_anthropic_to_openai(response) return response + def check_model_config(self): r"""Check whether the model configuration is valid for anthropic model backends. diff --git a/camel/models/azure_openai_model.py b/camel/models/azure_openai_model.py index b9600fa105..d603221f51 100644 --- a/camel/models/azure_openai_model.py +++ b/camel/models/azure_openai_model.py @@ -14,7 +14,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AzureOpenAI, Stream, AsyncAzureOpenAI +from openai import AsyncAzureOpenAI, AzureOpenAI, Stream from pydantic import BaseModel from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig diff --git a/camel/models/base_model.py b/camel/models/base_model.py index 92289f76fa..f42bbe8d5c 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -82,7 +82,7 @@ def _run( tools: Optional[List[Dict[str, Any]]], ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: pass - + # TODO: Add the async version of the run method # @abstractmethod def _arun( @@ -148,7 +148,8 @@ async def arun( `Stream[ChatCompletionChunk]` in the stream mode. """ response_format = ( - self.model_config_dict.get("response_format", None) or response_format + self.model_config_dict.get("response_format", None) + or response_format ) # If tools are empty, make it None tools = self.model_config_dict.get("tools", None) or tools or None diff --git a/camel/models/deepseek_model.py b/camel/models/deepseek_model.py index d1753375f8..de1a0c6cc1 100644 --- a/camel/models/deepseek_model.py +++ b/camel/models/deepseek_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream, AsyncOpenAI +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig @@ -81,7 +81,7 @@ def __init__( api_key=self._api_key, base_url=self._url, ) - + self._async_client = AsyncOpenAI( timeout=180, max_retries=3, @@ -150,6 +150,7 @@ async def _arun( **self.model_config_dict, ) return response + def check_model_config(self): r"""Check whether the model configuration contains any unexpected arguments to DeepSeek API. diff --git a/camel/models/openai_audio_models.py b/camel/models/openai_audio_models.py index ac2d5b0b35..5ecc3e33c5 100644 --- a/camel/models/openai_audio_models.py +++ b/camel/models/openai_audio_models.py @@ -14,7 +14,7 @@ import os from typing import Any, List, Optional, Union -from openai import OpenAI, _legacy_response, AsyncOpenAI +from openai import AsyncOpenAI, OpenAI, _legacy_response from camel.types import AudioModelType, VoiceType diff --git a/camel/models/openai_compatible_model.py b/camel/models/openai_compatible_model.py index 45c0c822aa..e4565e8fee 100644 --- a/camel/models/openai_compatible_model.py +++ b/camel/models/openai_compatible_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream, AsyncOpenAI +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.messages import OpenAIMessage diff --git a/camel/models/openai_model.py b/camel/models/openai_model.py index b3d78abd15..6ad2bbe75e 100644 --- a/camel/models/openai_model.py +++ b/camel/models/openai_model.py @@ -15,7 +15,7 @@ import warnings from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream, AsyncOpenAI +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig From b397824398af82f7953cab5928d9143db24106ab Mon Sep 17 00:00:00 2001 From: liuxukun2000 Date: Thu, 9 Jan 2025 14:45:19 -0600 Subject: [PATCH 09/28] add _async run in models --- camel/models/azure_openai_model.py | 2 +- camel/models/base_model.py | 9 +- camel/models/gemini_model.py | 32 +++- camel/models/groq_model.py | 33 +++- camel/models/litellm_model.py | 3 + camel/models/mistral_model.py | 3 + camel/models/nemotron_model.py | 28 +++- camel/models/nvidia_model.py | 39 ++++- camel/models/ollama_model.py | 43 +++++ camel/models/openai_compatible_model.py | 2 +- camel/models/qwen_model.py | 32 +++- camel/models/reka_model.py | 46 +++++- camel/models/samba_model.py | 203 +++++++++++++++++++++++- camel/models/sglang_model.py | 46 +++++- camel/models/stub_model.py | 37 +++++ camel/models/togetherai_model.py | 34 +++- camel/models/vllm_model.py | 33 +++- camel/models/yi_model.py | 32 +++- camel/models/zhipuai_model.py | 34 +++- 19 files changed, 673 insertions(+), 18 deletions(-) diff --git a/camel/models/azure_openai_model.py b/camel/models/azure_openai_model.py index d603221f51..0b26769550 100644 --- a/camel/models/azure_openai_model.py +++ b/camel/models/azure_openai_model.py @@ -158,7 +158,7 @@ async def _arun( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ - response = self._async_client.chat.completions.create( + response = await self._async_client.chat.completions.create( messages=messages, model=self.azure_deployment_name, # type:ignore[arg-type] **self.model_config_dict, diff --git a/camel/models/base_model.py b/camel/models/base_model.py index f42bbe8d5c..4c7b009852 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -12,7 +12,7 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Coroutine, Dict, List, Optional, Type, Union from openai import Stream from pydantic import BaseModel @@ -83,14 +83,15 @@ def _run( ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: pass - # TODO: Add the async version of the run method - # @abstractmethod + @abstractmethod def _arun( self, messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]], tools: Optional[List[Dict[str, Any]]], - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Coroutine[ + Any, Any, Union[ChatCompletion, Stream[ChatCompletionChunk]] + ]: pass def run( diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index 8bc9982c95..07d9f4c790 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -14,7 +14,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import Gemini_API_PARAMS, GeminiConfig @@ -82,6 +82,12 @@ def __init__( api_key=self._api_key, base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) def _run( self, @@ -107,6 +113,30 @@ def _run( ) return response + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of Gemini chat completion asynchronously. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response + @property def token_counter(self) -> BaseTokenCounter: r"""Initialize the token counter for the model backend. diff --git a/camel/models/groq_model.py b/camel/models/groq_model.py index 7a3115b4bb..4616bf8c2f 100644 --- a/camel/models/groq_model.py +++ b/camel/models/groq_model.py @@ -14,7 +14,7 @@ import os from typing import Any, Dict, List, Optional, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import GROQ_API_PARAMS, GroqConfig @@ -80,6 +80,12 @@ def __init__( api_key=self._api_key, base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) @property def token_counter(self) -> BaseTokenCounter: @@ -120,6 +126,31 @@ def _run( return response + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of OpenAI chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + + return response + def check_model_config(self): r"""Check whether the model configuration contains any unexpected arguments to Groq API. But Groq API does not have any additional diff --git a/camel/models/litellm_model.py b/camel/models/litellm_model.py index e3550cd80c..f5acf950e4 100644 --- a/camel/models/litellm_model.py +++ b/camel/models/litellm_model.py @@ -108,6 +108,9 @@ def token_counter(self) -> BaseTokenCounter: self._token_counter = LiteLLMTokenCounter(self.model_type) return self._token_counter + async def _arun(self) -> None: # type: ignore[override] + raise NotImplementedError + def _run( self, messages: List[OpenAIMessage], diff --git a/camel/models/mistral_model.py b/camel/models/mistral_model.py index 634de5cd3e..598cc59598 100644 --- a/camel/models/mistral_model.py +++ b/camel/models/mistral_model.py @@ -207,6 +207,9 @@ def token_counter(self) -> BaseTokenCounter: ) return self._token_counter + async def _arun(self) -> None: # type: ignore[override] + raise NotImplementedError("Mistral does not support async inference.") + def _run( self, messages: List[OpenAIMessage], diff --git a/camel/models/nemotron_model.py b/camel/models/nemotron_model.py index d181488161..8cdfe7168e 100644 --- a/camel/models/nemotron_model.py +++ b/camel/models/nemotron_model.py @@ -14,7 +14,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI +from openai import AsyncOpenAI, OpenAI from pydantic import BaseModel from camel.messages import OpenAIMessage @@ -63,6 +63,32 @@ def __init__( base_url=self._url, api_key=self._api_key, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + base_url=self._url, + api_key=self._api_key, + ) + + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> ChatCompletion: + r"""Runs inference of OpenAI chat completion asynchronously. + + Args: + messages (List[OpenAIMessage]): Message list. + + Returns: + ChatCompletion. + """ + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + ) + return response def _run( self, diff --git a/camel/models/nvidia_model.py b/camel/models/nvidia_model.py index ada246016f..57d617b26c 100644 --- a/camel/models/nvidia_model.py +++ b/camel/models/nvidia_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from openai.types.chat import ( ChatCompletion, ChatCompletionChunk, @@ -77,6 +77,43 @@ def __init__( api_key=self._api_key, base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) + + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of NVIDIA chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + + # Remove tool-related parameters if no tools are specified + config = dict(self.model_config_dict) + if not config.get("tools"): # None or empty list + config.pop("tools", None) + config.pop("tool_choice", None) + + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **config, + ) + return response def _run( self, diff --git a/camel/models/ollama_model.py b/camel/models/ollama_model.py index e8de2d95d2..8d5f88f037 100644 --- a/camel/models/ollama_model.py +++ b/camel/models/ollama_model.py @@ -76,6 +76,12 @@ def __init__( api_key="Set-but-ignored", # required but ignored base_url=self._url, ) + self._async_client = OpenAI( + timeout=180, + max_retries=3, + api_key="Set-but-ignored", # required but ignored + base_url=self._url, + ) def _start_server(self) -> None: r"""Starts the Ollama server in a subprocess.""" @@ -120,6 +126,43 @@ def check_model_config(self): "input into Ollama model backend." ) + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of OpenAI chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + if self.model_config_dict.get("response_format"): + # stream is not supported in beta.chat.completions.parse + if "stream" in self.model_config_dict: + del self.model_config_dict["stream"] + + response = self._async_client.beta.chat.completions.parse( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + + return self._to_chat_completion(response) + + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response + def _run( self, messages: List[OpenAIMessage], diff --git a/camel/models/openai_compatible_model.py b/camel/models/openai_compatible_model.py index e4565e8fee..a74f335d3f 100644 --- a/camel/models/openai_compatible_model.py +++ b/camel/models/openai_compatible_model.py @@ -116,7 +116,7 @@ async def _arun( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ - response = self._async_client.chat.completions.create( + response = await self._async_client.chat.completions.create( messages=messages, model=self.model_type, **self.model_config_dict, diff --git a/camel/models/qwen_model.py b/camel/models/qwen_model.py index 58badd8178..4275376328 100644 --- a/camel/models/qwen_model.py +++ b/camel/models/qwen_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import QWEN_API_PARAMS, QwenConfig @@ -82,6 +82,36 @@ def __init__( api_key=self._api_key, base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) + + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of Qwen chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response def _run( self, diff --git a/camel/models/reka_model.py b/camel/models/reka_model.py index d69e800053..4dae7dd34b 100644 --- a/camel/models/reka_model.py +++ b/camel/models/reka_model.py @@ -72,7 +72,7 @@ def __init__( url: Optional[str] = None, token_counter: Optional[BaseTokenCounter] = None, ) -> None: - from reka.client import Reka + from reka.client import AsyncReka, Reka if model_config_dict is None: model_config_dict = RekaConfig().as_dict() @@ -82,6 +82,9 @@ def __init__( model_type, model_config_dict, api_key, url, token_counter ) self._client = Reka(api_key=self._api_key, base_url=self._url) + self._async_client = AsyncReka( + api_key=self._api_key, base_url=self._url + ) def _convert_reka_to_openai_response( self, response: 'ChatResponse' @@ -177,6 +180,47 @@ def token_counter(self) -> BaseTokenCounter: ) return self._token_counter + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> ChatCompletion: + r"""Runs inference of Mistral chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + ChatCompletion. + """ + reka_messages = self._convert_openai_to_reka_messages(messages) + + response = await self._async_client.chat.create( + messages=reka_messages, + model=self.model_type, + **self.model_config_dict, + ) + + openai_response = self._convert_reka_to_openai_response(response) + + # Add AgentOps LLM Event tracking + if LLMEvent: + llm_event = LLMEvent( + thread_id=openai_response.id, + prompt=" ".join( + [message.get("content") for message in messages] # type: ignore[misc] + ), + prompt_tokens=openai_response.usage.input_tokens, # type: ignore[union-attr] + completion=openai_response.choices[0].message.content, + completion_tokens=openai_response.usage.output_tokens, # type: ignore[union-attr] + model=self.model_type, + ) + record(llm_event) + + return openai_response + def _run( self, messages: List[OpenAIMessage], diff --git a/camel/models/samba_model.py b/camel/models/samba_model.py index 85a3c66d58..f89ed11929 100644 --- a/camel/models/samba_model.py +++ b/camel/models/samba_model.py @@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Type, Union import httpx -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import ( @@ -106,6 +106,12 @@ def __init__( base_url=self._url, api_key=self._api_key, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + base_url=self._url, + api_key=self._api_key, + ) @property def token_counter(self) -> BaseTokenCounter: @@ -149,6 +155,30 @@ def check_model_config(self): " SambaNova service" ) + async def _arun( # type: ignore[misc] + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs SambaNova's service. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + if "tools" in self.model_config_dict: + del self.model_config_dict["tools"] + if self.model_config_dict.get("stream") is True: + return await self._arun_streaming(messages) + else: + return await self._arun_non_streaming(messages) + def _run( # type: ignore[misc] self, messages: List[OpenAIMessage], @@ -402,3 +432,174 @@ def stream(self) -> bool: bool: Whether the model is in stream mode. """ return self.model_config_dict.get('stream', False) + + async def _arun_streaming( + self, messages: List[OpenAIMessage] + ) -> Stream[ChatCompletionChunk]: + r"""Handles streaming inference with SambaNova's API. + + Args: + messages (List[OpenAIMessage]): A list of messages representing the + chat history in OpenAI API format. + + Returns: + Stream[ChatCompletionChunk]: A generator yielding + `ChatCompletionChunk` objects as they are received from the + API. + + Raises: + RuntimeError: If the HTTP request fails. + ValueError: If the API doesn't support stream mode. + """ + # Handle SambaNova's Cloud API + if self._url == "https://api.sambanova.ai/v1": + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + + # Add AgentOps LLM Event tracking + if LLMEvent: + llm_event = LLMEvent( + thread_id=response.id, + prompt=" ".join( + [message.get("content") for message in messages] # type: ignore[misc] + ), + prompt_tokens=response.usage.prompt_tokens, # type: ignore[union-attr] + completion=response.choices[0].message.content, + completion_tokens=response.usage.completion_tokens, # type: ignore[union-attr] + model=self.model_type, + ) + record(llm_event) + + return response + + elif self._url == "https://sambaverse.sambanova.ai/api/predict": + raise ValueError( + "https://sambaverse.sambanova.ai/api/predict doesn't support" + " stream mode" + ) + raise RuntimeError(f"Unknown URL: {self._url}") + + async def _arun_non_streaming( + self, messages: List[OpenAIMessage] + ) -> ChatCompletion: + r"""Handles non-streaming inference with SambaNova's API. + + Args: + messages (List[OpenAIMessage]): A list of messages representing the + message in OpenAI API format. + + Returns: + ChatCompletion: A `ChatCompletion` object containing the complete + response from the API. + + Raises: + RuntimeError: If the HTTP request fails. + ValueError: If the JSON response cannot be decoded or is missing + expected data. + """ + # Handle SambaNova's Cloud API + if self._url == "https://api.sambanova.ai/v1": + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + + # Add AgentOps LLM Event tracking + if LLMEvent: + llm_event = LLMEvent( + thread_id=response.id, + prompt=" ".join( + [message.get("content") for message in messages] # type: ignore[misc] + ), + prompt_tokens=response.usage.prompt_tokens, # type: ignore[union-attr] + completion=response.choices[0].message.content, + completion_tokens=response.usage.completion_tokens, # type: ignore[union-attr] + model=self.model_type, + ) + record(llm_event) + + return response + + # Handle SambaNova's Sambaverse API + else: + headers = { + "Content-Type": "application/json", + "key": str(self._api_key), + "modelName": self.model_type, + } + + data = { + "instance": json.dumps( + { + "conversation_id": str(uuid.uuid4()), + "messages": messages, + } + ), + "params": { + "do_sample": {"type": "bool", "value": "true"}, + "max_tokens_to_generate": { + "type": "int", + "value": str(self.model_config_dict.get("max_tokens")), + }, + "process_prompt": {"type": "bool", "value": "true"}, + "repetition_penalty": { + "type": "float", + "value": str( + self.model_config_dict.get("repetition_penalty") + ), + }, + "return_token_count_only": { + "type": "bool", + "value": "false", + }, + "select_expert": { + "type": "str", + "value": self.model_type.split("/")[1], + }, + "stop_sequences": { + "type": "str", + "value": self.model_config_dict.get("stop_sequences"), + }, + "temperature": { + "type": "float", + "value": str( + self.model_config_dict.get("temperature") + ), + }, + "top_k": { + "type": "int", + "value": str(self.model_config_dict.get("top_k")), + }, + "top_p": { + "type": "float", + "value": str(self.model_config_dict.get("top_p")), + }, + }, + } + + try: + # Send the request and handle the response + with httpx.Client() as client: + response = client.post( + self._url, # type: ignore[arg-type] + headers=headers, + json=data, + ) + + raw_text = response.text + # Split the string into two dictionaries + dicts = raw_text.split("}\n{") + + # Keep only the last dictionary + last_dict = "{" + dicts[-1] + + # Parse the dictionary + last_dict = json.loads(last_dict) + return self._sambaverse_to_openai_response(last_dict) # type: ignore[arg-type] + + except httpx.HTTPStatusError: + raise RuntimeError(f"HTTP request failed: {raw_text}") diff --git a/camel/models/sglang_model.py b/camel/models/sglang_model.py index d7a2838485..30db603e25 100644 --- a/camel/models/sglang_model.py +++ b/camel/models/sglang_model.py @@ -16,7 +16,7 @@ import time from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import SGLANG_API_PARAMS, SGLangConfig @@ -86,6 +86,12 @@ def __init__( api_key="Set-but-ignored", # required but ignored base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key="Set-but-ignored", # required but ignored + base_url=self._url, + ) def _start_server(self) -> None: from sglang.utils import ( # type: ignore[import-untyped] @@ -179,6 +185,44 @@ def check_model_config(self): "input into SGLang model backend." ) + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of OpenAI chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + + # Ensure server is running + self._ensure_server_running() + + with self._lock: + # Update last run time + self.last_run_time = time.time() + + if self._client is None: + raise RuntimeError( + "Client is not initialized. Ensure the server is running." + ) + + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + + return response + def _run( self, messages: List[OpenAIMessage], diff --git a/camel/models/stub_model.py b/camel/models/stub_model.py index d4f3180ba2..8edd87dce5 100644 --- a/camel/models/stub_model.py +++ b/camel/models/stub_model.py @@ -75,6 +75,43 @@ def token_counter(self) -> BaseTokenCounter: self._token_counter = StubTokenCounter() return self._token_counter + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Run fake inference by returning a fixed string. + All arguments are unused for the dummy model. + + Returns: + Dict[str, Any]: Response in the OpenAI API format. + """ + ARBITRARY_STRING = "Lorem Ipsum" + response: ChatCompletion = ChatCompletion( + id="stub_model_id", + model="stub", + object="chat.completion", + created=int(time.time()), + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage( + content=ARBITRARY_STRING, + role="assistant", + ), + logprobs=None, + ) + ], + usage=CompletionUsage( + completion_tokens=10, + prompt_tokens=10, + total_tokens=20, + ), + ) + return response + def _run( self, messages: List[OpenAIMessage], diff --git a/camel/models/togetherai_model.py b/camel/models/togetherai_model.py index 3d0177b0f0..a32796c046 100644 --- a/camel/models/togetherai_model.py +++ b/camel/models/togetherai_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import TOGETHERAI_API_PARAMS, TogetherAIConfig @@ -83,6 +83,38 @@ def __init__( api_key=self._api_key, base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) + + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of OpenAI chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + # Use OpenAI cilent as interface call Together AI + # Reference: https://docs.together.ai/docs/openai-api-compatibility + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response def _run( self, diff --git a/camel/models/vllm_model.py b/camel/models/vllm_model.py index 84d5d2589b..4dd9204200 100644 --- a/camel/models/vllm_model.py +++ b/camel/models/vllm_model.py @@ -15,7 +15,7 @@ import subprocess from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import VLLM_API_PARAMS, VLLMConfig @@ -78,6 +78,12 @@ def __init__( api_key="EMPTY", # required but ignored base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key="EMPTY", # required but ignored + base_url=self._url, + ) def _start_server(self) -> None: r"""Starts the vllm server in a subprocess.""" @@ -122,6 +128,31 @@ def check_model_config(self): "input into vLLM model backend." ) + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of OpenAI chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response + def _run( self, messages: List[OpenAIMessage], diff --git a/camel/models/yi_model.py b/camel/models/yi_model.py index 1bbd60c70f..5ec6ea84e8 100644 --- a/camel/models/yi_model.py +++ b/camel/models/yi_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import YI_API_PARAMS, YiConfig @@ -81,6 +81,36 @@ def __init__( api_key=self._api_key, base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) + + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of Yi chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response def _run( self, diff --git a/camel/models/zhipuai_model.py b/camel/models/zhipuai_model.py index 9a7e4606d6..e3b4c0b7e5 100644 --- a/camel/models/zhipuai_model.py +++ b/camel/models/zhipuai_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, OpenAI, Stream from pydantic import BaseModel from camel.configs import ZHIPUAI_API_PARAMS, ZhipuAIConfig @@ -81,6 +81,38 @@ def __init__( api_key=self._api_key, base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) + + async def _arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Runs inference of OpenAI chat completion. + + Args: + messages (List[OpenAIMessage]): Message list with the chat history + in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + # Use OpenAI cilent as interface call ZhipuAI + # Reference: https://open.bigmodel.cn/dev/api#openai_sdk + response = await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **self.model_config_dict, + ) + return response def _run( self, From 2ce8758d7e8f67cb9e2d32217d1c427330f0fdbc Mon Sep 17 00:00:00 2001 From: liuxukun2000 Date: Thu, 9 Jan 2025 14:50:42 -0600 Subject: [PATCH 10/28] add async run in chat agent --- camel/agents/chat_agent.py | 92 ++++++++++++++++++++++++++++++++++- camel/models/model_manager.py | 37 ++++++++++++++ 2 files changed, 128 insertions(+), 1 deletion(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 410d80788e..f13fa997c4 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -552,7 +552,7 @@ async def step_async( e.args[1], tool_call_records, "max_tokens_exceeded" ) - response = self._get_model_response( + response = await self._aget_model_response( openai_messages, response_format, num_tokens ) @@ -629,6 +629,43 @@ def _get_model_response( else: return self._handle_stream_response(response, num_tokens) + async def _aget_model_response( + self, + openai_messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]], + num_tokens: int, + ) -> _ModelResponse: + r"""Internal function for agent step model response.""" + + response = None + try: + response = await self.model_backend.arun( + openai_messages, response_format, self._get_full_tool_schemas() + ) + except Exception as exc: + logger.error( + f"An error occurred while running model " + f"{self.model_backend.model_type}, " + f"index: {self.model_backend.current_model_index}", + exc_info=exc, + ) + if not response: + raise ModelProcessingError( + "Unable to process messages: none of the provided models " + "run succesfully." + ) + + logger.info( + f"Model {self.model_backend.model_type}, " + f"index {self.model_backend.current_model_index}, " + f"processed these messages: {openai_messages}" + ) + + if isinstance(response, ChatCompletion): + return self._handle_batch_response(response) + else: + return await self._ahandle_stream_response(response, num_tokens) + def _step_get_info( self, output_messages: List[BaseMessage], @@ -793,6 +830,59 @@ def _safe_model_dump(self, obj) -> dict: else: raise TypeError("The object is not a Pydantic model") + async def _ahandle_stream_response( + self, + response: Stream[ChatCompletionChunk], + prompt_tokens: int, + ) -> _ModelResponse: + r"""Process a stream response from the model and extract the necessary + information. + + Args: + response (dict): Model response. + prompt_tokens (int): Number of input prompt tokens. + + Returns: + _ModelResponse: a parsed model response. + """ + content_dict: defaultdict = defaultdict(lambda: "") + finish_reasons_dict: defaultdict = defaultdict(lambda: "") + output_messages: List[BaseMessage] = [] + response_id: str = "" + # All choices in one response share one role + async for chunk in response: + response_id = chunk.id + for choice in chunk.choices: + index = choice.index + delta = choice.delta + if delta.content is not None: + # When response has not been stopped + # Notice that only the first chunk_dict has the "role" + content_dict[index] += delta.content + if choice.finish_reason: + finish_reasons_dict[index] = choice.finish_reason + chat_message = BaseMessage( + role_name=self.role_name, + role_type=self.role_type, + meta_dict=dict(), + content=content_dict[index], + ) + output_messages.append(chat_message) + finish_reasons = [ + finish_reasons_dict[i] for i in range(len(finish_reasons_dict)) + ] + usage_dict = self.get_usage_dict(output_messages, prompt_tokens) + + # TODO: Handle tool calls + return _ModelResponse( + response=response, + tool_call_request=None, + output_messages=output_messages, + finish_reasons=finish_reasons, + usage_dict=usage_dict, + response_id=response_id, + ) + def _handle_stream_response( self, response: Stream[ChatCompletionChunk], diff --git a/camel/models/model_manager.py b/camel/models/model_manager.py index f30babf2ba..b8d23d7798 100644 --- a/camel/models/model_manager.py +++ b/camel/models/model_manager.py @@ -216,3 +216,40 @@ def run( self.current_model = self.scheduling_strategy() raise exc return response + + async def arun( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + r"""Process a list of messages by selecting a model based on + the scheduling strategy. + Sends the entire list of messages to the selected model, + and returns a single response. + + Args: + messages (List[OpenAIMessage]): Message list with the chat + history in OpenAI API format. + + Returns: + Union[ChatCompletion, Stream[ChatCompletionChunk]]: + `ChatCompletion` in the non-stream mode, or + `Stream[ChatCompletionChunk]` in the stream mode. + """ + self.current_model = self.scheduling_strategy() + + # Pass all messages to the selected model and get the response + try: + response = await self.current_model.arun(messages, response_format, tools) + except Exception as exc: + logger.error(f"Error processing with model: {self.current_model}") + if self.scheduling_strategy == self.always_first: + self.scheduling_strategy = self.round_robin + logger.warning( + "The scheduling strategy has been changed to 'round_robin'" + ) + # Skip already used one + self.current_model = self.scheduling_strategy() + raise exc + return response From 40007c89fc24bb0a6ddc2633258dc41674c65c57 Mon Sep 17 00:00:00 2001 From: liuxukun2000 Date: Thu, 9 Jan 2025 15:48:28 -0600 Subject: [PATCH 11/28] precommit fix --- camel/agents/chat_agent.py | 8 ++++---- camel/models/model_manager.py | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index f13fa997c4..f9f24b39a5 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -28,7 +28,7 @@ Union, ) -from openai import Stream +from openai import AsyncStream, Stream from pydantic import BaseModel, ConfigDict from camel.agents.base import BaseAgent @@ -105,7 +105,7 @@ class _ModelResponse(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) - response: Union[ChatCompletion, Stream] + response: Union[ChatCompletion, Stream, AsyncStream] tool_call_request: Optional[_ToolCallRequest] output_messages: List[BaseMessage] finish_reasons: List[str] @@ -639,7 +639,7 @@ async def _aget_model_response( response = None try: - response = await self.model_backend.arun( + response = await self.model_backend.arun( # type: ignore openai_messages, response_format, self._get_full_tool_schemas() ) except Exception as exc: @@ -832,7 +832,7 @@ def _safe_model_dump(self, obj) -> dict: async def _ahandle_stream_response( self, - response: Stream[ChatCompletionChunk], + response: AsyncStream[ChatCompletionChunk], prompt_tokens: int, ) -> _ModelResponse: r"""Process a stream response from the model and extract the necessary diff --git a/camel/models/model_manager.py b/camel/models/model_manager.py index b8d23d7798..e96b81a8de 100644 --- a/camel/models/model_manager.py +++ b/camel/models/model_manager.py @@ -241,7 +241,9 @@ async def arun( # Pass all messages to the selected model and get the response try: - response = await self.current_model.arun(messages, response_format, tools) + response = await self.current_model.arun( + messages, response_format, tools + ) except Exception as exc: logger.error(f"Error processing with model: {self.current_model}") if self.scheduling_strategy == self.always_first: From f6e4041080339484de7151f2f5d4fd3897820d60 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Thu, 9 Jan 2025 18:51:15 -0600 Subject: [PATCH 12/28] sort out codes in ChatAgent --- camel/agents/chat_agent.py | 405 ++++++++---------- camel/toolkits/function_tool.py | 4 + examples/models/role_playing_with_cohere.py | 6 +- examples/models/role_playing_with_mistral.py | 6 +- examples/models/role_playing_with_ollama.py | 6 +- .../models/role_playing_with_sambanova.py | 6 +- ...gentops_track_roleplaying_with_function.py | 6 +- .../toolkits/role_playing_with_functions.py | 6 +- test/agents/test_chat_agent.py | 8 +- 9 files changed, 211 insertions(+), 242 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index f9f24b39a5..5123f854f7 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -93,6 +93,77 @@ def _convert_to_schema( return tool +def _safe_model_dump(obj) -> Dict[str, Any]: + r"""Safely dump a Pydantic model to a dictionary. + + This method attempts to use the `model_dump` method if available, + otherwise it falls back to the `dict` method. + """ + # Check if the `model_dump` method exists (Pydantic v2) + if hasattr(obj, "model_dump"): + return obj.model_dump() + # Fallback to `dict()` method (Pydantic v1) + elif hasattr(obj, "dict"): + return obj.dict() + else: + raise TypeError("The object is not a Pydantic model") + + +def _get_info_dict( + session_id: Optional[str], + usage: Optional[Dict[str, int]], + termination_reasons: List[str], + num_tokens: int, + tool_calls: List[ToolCallingRecord], +) -> Dict[str, Any]: + r"""Returns a dictionary containing information about the chat session. + + Args: + session_id (str, optional): The ID of the chat session. + usage (Dict[str, int], optional): Information about the usage of + the LLM. + termination_reasons (List[str]): The reasons for the termination + of the chat session. + num_tokens (int): The number of tokens used in the chat session. + tool_calls (List[FunctionCallingRecord]): The list of function + calling records, containing the information of called tools. + + Returns: + Dict[str, Any]: The chat session information. + """ + return { + "id": session_id, + "usage": usage, + "termination_reasons": termination_reasons, + "num_tokens": num_tokens, + "tool_calls": tool_calls, + } + + +def _handle_logprobs(choice: Choice) -> Optional[List[Dict[str, Any]]]: + # Process log probabilities, append to the message meta information + if choice.logprobs is None: + return None + + tokens_logprobs = choice.logprobs.content + + if tokens_logprobs is None: + return None + + # Extract and structure logprob information + return [ + { + "token": token_logprob.token, + "logprob": token_logprob.logprob, + "top_logprobs": [ + (top_logprob.token, top_logprob.logprob) + for top_logprob in token_logprob.top_logprobs + ], + } + for token_logprob in tokens_logprobs + ] + + class _ToolCallRequest(BaseModel): r"""The request for tool calling.""" @@ -113,7 +184,7 @@ class _ModelResponse(BaseModel): response_id: str -class FunctionCallingRecord(BaseModel): +class ToolCallingRecord(BaseModel): r"""Historical records of functions called in the conversation. Attributes: @@ -139,7 +210,7 @@ def __str__(self) -> str: f"\tResult: {self.result}" ) - def as_dict(self) -> dict[str, Any]: + def as_dict(self) -> Dict[str, Any]: r"""Returns the function calling record as a dictionary. Returns: @@ -368,37 +439,6 @@ def _update_system_message_for_output_language(self) -> None: content=language_prompt, ) - def _get_info_dict( - self, - session_id: Optional[str], - usage: Optional[Dict[str, int]], - termination_reasons: List[str], - num_tokens: int, - tool_calls: List[FunctionCallingRecord], - ) -> Dict[str, Any]: - r"""Returns a dictionary containing information about the chat session. - - Args: - session_id (str, optional): The ID of the chat session. - usage (Dict[str, int], optional): Information about the usage of - the LLM. - termination_reasons (List[str]): The reasons for the termination - of the chat session. - num_tokens (int): The number of tokens used in the chat session. - tool_calls (List[FunctionCallingRecord]): The list of function - calling records, containing the information of called tools. - - Returns: - Dict[str, Any]: The chat session information. - """ - return { - "id": session_id, - "usage": usage, - "termination_reasons": termination_reasons, - "num_tokens": num_tokens, - "tool_calls": tool_calls, - } - def init_messages(self) -> None: r"""Initializes the stored messages list with the current system message. @@ -441,18 +481,15 @@ def step( """ # Convert input message to BaseMessage if necessary - input_message = ( - BaseMessage.make_user_message( + if isinstance(input_message, str): + input_message = BaseMessage.make_user_message( role_name="User", content=input_message ) - if isinstance(input_message, str) - else input_message - ) # Add user input to memory self.update_memory(input_message, OpenAIBackendRole.USER) - tool_call_records: List[FunctionCallingRecord] = [] + tool_call_records: List[ToolCallingRecord] = [] while True: try: @@ -461,8 +498,7 @@ def step( return self._step_token_exceed( e.args[1], tool_call_records, "max_tokens_exceeded" ) - - # Process model response + # Get response from model backend response = self._get_model_response( openai_messages, response_format, num_tokens ) @@ -471,46 +507,20 @@ def step( break # TODO: return with external tools - - # Handle tool requests - if response.tool_call_request: - tool_call_records.append( - self._execute_tool(response.tool_call_request) - ) + # If tool call requested, execute it and enter the next iteration + if tool_call_request := response.tool_call_request: + tool_call_records.append(self._execute_tool(tool_call_request)) continue break - # Final info and response - info = self._step_get_info( - response.output_messages, - response.finish_reasons, - response.usage_dict, - response.response_id, - tool_call_records, - num_tokens, - ) - self._log_final_output(response.output_messages) - return ChatAgentResponse( - msgs=response.output_messages, - terminated=self.terminated, - info=info, + return self._parse_chatagent_response( + response, tool_call_records, num_tokens ) - def _log_final_output(self, output_messages: List[BaseMessage]) -> None: - r"""Log final messages or warnings about multiple responses.""" - if len(output_messages) == 1: - self.record_message(output_messages[0]) - else: - logger.warning( - "Multiple messages returned in `step()`. Record " - "selected message manually using `record_message()`." - ) - - # TODO: Redesign this method - async def step_async( + async def astep( self, input_message: Union[BaseMessage, str], response_format: Optional[Type[BaseModel]] = None, @@ -543,7 +553,7 @@ async def step_async( self.update_memory(input_message, OpenAIBackendRole.USER) - tool_call_records: List[FunctionCallingRecord] = [] + tool_call_records: List[ToolCallingRecord] = [] while True: try: openai_messages, num_tokens = self.memory.get_context() @@ -560,13 +570,25 @@ async def step_async( break if tool_call_request := response.tool_call_request: - tool_call_records.append( - await self._execute_tool_async(tool_call_request) - ) + tool_call_record = await self._aexecute_tool(tool_call_request) + tool_call_records.append(tool_call_record) continue break + self._log_final_output(response.output_messages) + + return self._parse_chatagent_response( + response, tool_call_records, num_tokens + ) + + def _parse_chatagent_response( + self, + response: _ModelResponse, + tool_call_records: List[ToolCallingRecord], + num_tokens: int, + ) -> ChatAgentResponse: + r"""Parse the final model response into the chat agent response.""" info = self._step_get_info( response.output_messages, response.finish_reasons, @@ -576,22 +598,22 @@ async def step_async( num_tokens, ) - if len(response.output_messages) == 1: - # Auto record if the output result is a single message - self.record_message(response.output_messages[0]) - else: - logger.warning( - "Multiple messages returned in `step()`, message won't be " - "recorded automatically. Please call `record_message()` to " - "record the selected message manually." - ) - return ChatAgentResponse( msgs=response.output_messages, terminated=self.terminated, info=info, ) + def _log_final_output(self, output_messages: List[BaseMessage]) -> None: + r"""Log final messages or warnings about multiple responses.""" + if len(output_messages) == 1: + self.record_message(output_messages[0]) + else: + logger.warning( + "Multiple messages returned in `step()`. Record " + "selected message manually using `record_message()`." + ) + def _get_model_response( self, openai_messages: List[OpenAIMessage], @@ -639,7 +661,7 @@ async def _aget_model_response( response = None try: - response = await self.model_backend.arun( # type: ignore + response = await self.model_backend.arun( openai_messages, response_format, self._get_full_tool_schemas() ) except Exception as exc: @@ -672,7 +694,7 @@ def _step_get_info( finish_reasons: List[str], usage_dict: Dict[str, int], response_id: str, - tool_calls: List[FunctionCallingRecord], + tool_calls: List[ToolCallingRecord], num_tokens: int, ) -> Dict[str, Any]: r"""Process the output of a chat step and gather information about the @@ -722,7 +744,7 @@ def _step_get_info( if self.terminated and termination_reason is not None: finish_reasons = [termination_reason] * len(finish_reasons) - return self._get_info_dict( + return _get_info_dict( response_id, usage_dict, finish_reasons, @@ -745,7 +767,7 @@ def _handle_batch_response( output_messages: List[BaseMessage] = [] for choice in response.choices: meta_dict = {} - if logprobs_info := self._handle_logprobs(choice): + if logprobs_info := _handle_logprobs(choice): meta_dict["logprobs_info"] = logprobs_info chat_message = BaseMessage( @@ -761,13 +783,12 @@ def _handle_batch_response( finish_reasons = [ str(choice.finish_reason) for choice in response.choices ] - usage = ( - self._safe_model_dump(response.usage) - if response.usage is not None - else {} - ) - tool_call_request = None + usage = {} + if response.usage is not None: + usage = _safe_model_dump(response.usage) + + tool_call_request: Optional[_ToolCallRequest] = None if tool_calls := response.choices[0].message.tool_calls: func_name = tool_calls[0].function.name args = json.loads(tool_calls[0].function.arguments) @@ -784,55 +805,9 @@ def _handle_batch_response( response_id=response.id, ) - def _handle_logprobs( - self, choice: Choice - ) -> Optional[List[Dict[str, Any]]]: - # Process log probabilities, append to the message meta information - if choice.logprobs is None: - return None - - tokens_logprobs = choice.logprobs.content - - if tokens_logprobs is None: - return None - - # Extract and structure logprob information - return [ - { - "token": token_logprob.token, - "logprob": token_logprob.logprob, - "top_logprobs": [ - (top_logprob.token, top_logprob.logprob) - for top_logprob in token_logprob.top_logprobs - ], - } - for token_logprob in tokens_logprobs - ] - - def _safe_model_dump(self, obj) -> dict: - r"""Safely dump a Pydantic model to a dictionary. - - This method attempts to use the `model_dump` method if available, - otherwise it falls back to the `dict` method. - - Args: - obj: The Pydantic model instance to be dumped. - - Returns: - dict: A dictionary representation of the Pydantic model. - """ - # Check if the `model_dump` method exists (Pydantic v2) - if hasattr(obj, "model_dump"): - return obj.model_dump() - # Fallback to `dict()` method (Pydantic v1) - elif hasattr(obj, "dict"): - return obj.dict() - else: - raise TypeError("The object is not a Pydantic model") - - async def _ahandle_stream_response( + def _handle_stream_response( self, - response: AsyncStream[ChatCompletionChunk], + response: Stream[ChatCompletionChunk], prompt_tokens: int, ) -> _ModelResponse: r"""Process a stream response from the model and extract the necessary @@ -850,24 +825,11 @@ async def _ahandle_stream_response( output_messages: List[BaseMessage] = [] response_id: str = "" # All choices in one response share one role - async for chunk in response: + for chunk in response: response_id = chunk.id - for choice in chunk.choices: - index = choice.index - delta = choice.delta - if delta.content is not None: - # When response has not been stopped - # Notice that only the first chunk_dict has the "role" - content_dict[index] += delta.content - if choice.finish_reason: - finish_reasons_dict[index] = choice.finish_reason - chat_message = BaseMessage( - role_name=self.role_name, - role_type=self.role_type, - meta_dict=dict(), - content=content_dict[index], - ) - output_messages.append(chat_message) + self._handle_chunk( + chunk, content_dict, finish_reasons_dict, output_messages + ) finish_reasons = [ finish_reasons_dict[i] for i in range(len(finish_reasons_dict)) ] @@ -883,9 +845,9 @@ async def _ahandle_stream_response( response_id=response_id, ) - def _handle_stream_response( + async def _ahandle_stream_response( self, - response: Stream[ChatCompletionChunk], + response: AsyncStream[ChatCompletionChunk], prompt_tokens: int, ) -> _ModelResponse: r"""Process a stream response from the model and extract the necessary @@ -903,24 +865,11 @@ def _handle_stream_response( output_messages: List[BaseMessage] = [] response_id: str = "" # All choices in one response share one role - for chunk in response: + async for chunk in response: response_id = chunk.id - for choice in chunk.choices: - index = choice.index - delta = choice.delta - if delta.content is not None: - # When response has not been stopped - # Notice that only the first chunk_dict has the "role" - content_dict[index] += delta.content - if choice.finish_reason: - finish_reasons_dict[index] = choice.finish_reason - chat_message = BaseMessage( - role_name=self.role_name, - role_type=self.role_type, - meta_dict=dict(), - content=content_dict[index], - ) - output_messages.append(chat_message) + self._handle_chunk( + chunk, content_dict, finish_reasons_dict, output_messages + ) finish_reasons = [ finish_reasons_dict[i] for i in range(len(finish_reasons_dict)) ] @@ -936,10 +885,40 @@ def _handle_stream_response( response_id=response_id, ) + def _handle_choice_chunk( + self, + chunk: ChatCompletionChunk, + content_dict: defaultdict, + finish_reasons_dict: defaultdict, + output_messages: List[BaseMessage], + ) -> Optional[BaseMessage]: + r"""Handle a chunk of the model response. + + Returns: + Optional[BaseMessage]: The message if the response is finished. + """ + for choice in chunk.choices: + index = choice.index + delta = choice.delta + if delta.content is not None: + content_dict[index] += delta.content + + if not choice.finish_reason: + continue + + finish_reasons_dict[index] = choice.finish_reason + chat_message = BaseMessage( + role_name=self.role_name, + role_type=self.role_type, + meta_dict=dict(), + content=content_dict[index], + ) + output_messages.append(chat_message) + def _step_token_exceed( self, num_tokens: int, - tool_calls: List[FunctionCallingRecord], + tool_calls: List[ToolCallingRecord], termination_reason: str, ) -> ChatAgentResponse: r"""Return trivial response containing number of tokens and information @@ -957,7 +936,7 @@ def _step_token_exceed( """ self.terminated = True - info = self._get_info_dict( + info = _get_info_dict( None, None, [termination_reason], @@ -974,7 +953,7 @@ def _step_token_exceed( def _execute_tool( self, tool_call_request: _ToolCallRequest, - ) -> FunctionCallingRecord: + ) -> ToolCallingRecord: r"""Execute the tool with arguments following the model's response. Args: @@ -990,37 +969,12 @@ def _execute_tool( tool = self._internal_tools[func_name] result = tool(**args) - assist_msg = FunctionCallingMessage( - role_name=self.role_name, - role_type=self.role_type, - meta_dict=None, - content="", - func_name=func_name, - args=args, - ) - func_msg = FunctionCallingMessage( - role_name=self.role_name, - role_type=self.role_type, - meta_dict=None, - content="", - func_name=func_name, - result=result, - ) + return self._record_tool_calling(func_name, args, result) - self.update_memory(assist_msg, OpenAIBackendRole.ASSISTANT) - self.update_memory(func_msg, OpenAIBackendRole.FUNCTION) - - # Record information about this function call - func_record = FunctionCallingRecord( - func_name=func_name, args=args, result=result - ) - - return func_record - - async def _execute_tool_async( + async def _aexecute_tool( self, tool_call_request: _ToolCallRequest, - ) -> FunctionCallingRecord: + ) -> ToolCallingRecord: r"""Execute the async tool with arguments following the model's response. @@ -1034,8 +988,19 @@ async def _execute_tool_async( func_name = tool_call_request.func_name args = tool_call_request.args tool = self._internal_tools[func_name] - result = await tool(**args) + if tool.is_async: + result = await tool(**args) + else: + result = tool(**args) + + return self._record_tool_calling(func_name, args, result) + def _record_tool_calling( + self, func_name: str, args: Dict[str, Any], result: Any + ): + r"""Record the tool calling information in the memory, and return the + tool calling record. + """ assist_msg = FunctionCallingMessage( role_name=self.role_name, role_type=self.role_type, @@ -1057,11 +1022,11 @@ async def _execute_tool_async( self.update_memory(func_msg, OpenAIBackendRole.FUNCTION) # Record information about this function call - func_record = FunctionCallingRecord( + tool_record = ToolCallingRecord( func_name=func_name, args=args, result=result ) - return func_record + return tool_record def get_usage_dict( self, output_messages: List[BaseMessage], prompt_tokens: int @@ -1076,15 +1041,15 @@ def get_usage_dict( dict: Usage dictionary. """ encoding = get_model_encoding(self.model_type.value_for_tiktoken) - completion_tokens = 0 - for message in output_messages: - completion_tokens += len(encoding.encode(message.content)) - usage_dict = dict( + completion_tokens = sum( + len(encoding.encode(message.content)) + for message in output_messages + ) + return dict( completion_tokens=completion_tokens, prompt_tokens=prompt_tokens, total_tokens=completion_tokens + prompt_tokens, ) - return usage_dict def add_model_scheduling_strategy(self, name: str, strategy_fn: Callable): r"""Add a scheduling strategy method provided by user to ModelManger. diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index 6a081e7926..d7e92c4020 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -398,6 +398,10 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: f"Error: {e}" ) + @property + def is_async(self) -> bool: + return inspect.iscoroutinefunction(self.func) + @staticmethod def validate_openai_tool_schema( openai_tool_schema: Dict[str, Any], diff --git a/examples/models/role_playing_with_cohere.py b/examples/models/role_playing_with_cohere.py index ff93313d00..89770e94f3 100644 --- a/examples/models/role_playing_with_cohere.py +++ b/examples/models/role_playing_with_cohere.py @@ -15,7 +15,7 @@ from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import CohereConfig from camel.models import ModelFactory from camel.societies import RolePlaying @@ -120,8 +120,8 @@ def main( # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/models/role_playing_with_mistral.py b/examples/models/role_playing_with_mistral.py index e92ade930c..5ef2217d67 100644 --- a/examples/models/role_playing_with_mistral.py +++ b/examples/models/role_playing_with_mistral.py @@ -16,7 +16,7 @@ from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import MistralConfig from camel.models import ModelFactory from camel.societies import RolePlaying @@ -120,8 +120,8 @@ def main( # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/models/role_playing_with_ollama.py b/examples/models/role_playing_with_ollama.py index 77eaeec33e..fe58c5a44b 100644 --- a/examples/models/role_playing_with_ollama.py +++ b/examples/models/role_playing_with_ollama.py @@ -16,7 +16,7 @@ from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.models import ModelFactory from camel.societies import RolePlaying from camel.types import ModelPlatformType @@ -100,8 +100,8 @@ def main( # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/models/role_playing_with_sambanova.py b/examples/models/role_playing_with_sambanova.py index 2cbba7342d..9b8834fe24 100644 --- a/examples/models/role_playing_with_sambanova.py +++ b/examples/models/role_playing_with_sambanova.py @@ -17,7 +17,7 @@ import agentops from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import SambaCloudAPIConfig from camel.models import ModelFactory from camel.societies import RolePlaying @@ -128,8 +128,8 @@ def main( # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/observability/agentops_track_roleplaying_with_function.py b/examples/observability/agentops_track_roleplaying_with_function.py index 800ddfcf3f..4570ae4f6e 100644 --- a/examples/observability/agentops_track_roleplaying_with_function.py +++ b/examples/observability/agentops_track_roleplaying_with_function.py @@ -17,7 +17,7 @@ import agentops from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import ChatGPTConfig from camel.models import ModelFactory from camel.societies import RolePlaying @@ -125,8 +125,8 @@ # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/toolkits/role_playing_with_functions.py b/examples/toolkits/role_playing_with_functions.py index b516523321..157ea4bf17 100644 --- a/examples/toolkits/role_playing_with_functions.py +++ b/examples/toolkits/role_playing_with_functions.py @@ -16,7 +16,7 @@ from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import ChatGPTConfig from camel.models import ModelFactory from camel.societies import RolePlaying @@ -122,8 +122,8 @@ def main( # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/test/agents/test_chat_agent.py b/test/agents/test_chat_agent.py index cad8736f9d..491b1b125f 100644 --- a/test/agents/test_chat_agent.py +++ b/test/agents/test_chat_agent.py @@ -30,7 +30,7 @@ from pydantic import BaseModel, Field from camel.agents import ChatAgent -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import ChatGPTConfig from camel.generators import SystemMessageGenerator from camel.memories import MemoryRecord @@ -857,7 +857,7 @@ def test_tool_calling_sync(step_call_count=3): for i in range(step_call_count): agent_response = agent.step(user_msg) - tool_calls: List[FunctionCallingRecord] = [ + tool_calls: List[ToolCallingRecord] = [ call for call in agent_response.info['tool_calls'] ] @@ -980,7 +980,7 @@ async def test_tool_calling_math_async(step_call_count=3): ) for i in range(step_call_count): - agent_response = await agent.step_async(user_msg) + agent_response = await agent.astep(user_msg) tool_calls = agent_response.info['tool_calls'] @@ -1068,7 +1068,7 @@ def mock_run_tool_calling_async(*args, **kwargs): ) for i in range(step_call_count): - agent_response = await agent.step_async(user_msg) + agent_response = await agent.astep(user_msg) tool_calls = agent_response.info['tool_calls'] From b9eb5be214d049163cbadc9b71f022b8bbd47b19 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Thu, 9 Jan 2025 19:13:41 -0600 Subject: [PATCH 13/28] extract functions and types --- camel/agents/_types.py | 26 +++ camel/agents/{utils.py => _utils.py} | 94 +++++++++- camel/agents/chat_agent.py | 211 ++++------------------ camel/types/agents/__init__.py | 3 + camel/types/agents/tool_calling_record.py | 38 ++++ 5 files changed, 195 insertions(+), 177 deletions(-) create mode 100644 camel/agents/_types.py rename camel/agents/{utils.py => _utils.py} (61%) create mode 100644 camel/types/agents/__init__.py create mode 100644 camel/types/agents/tool_calling_record.py diff --git a/camel/agents/_types.py b/camel/agents/_types.py new file mode 100644 index 0000000000..07e0033feb --- /dev/null +++ b/camel/agents/_types.py @@ -0,0 +1,26 @@ +from typing import Any, Dict, List, Optional, Union + +from pydantic import BaseModel, ConfigDict + +from camel.messages import BaseMessage +from camel.types import AsyncStream, ChatCompletion, Stream + + +class ToolCallRequest(BaseModel): + r"""The request for tool calling.""" + + func_name: str + args: Dict[str, Any] + + +class ModelResponse(BaseModel): + r"""The response from the model.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + response: Union[ChatCompletion, Stream, AsyncStream] + tool_call_request: Optional[ToolCallRequest] + output_messages: List[BaseMessage] + finish_reasons: List[str] + usage_dict: Dict[str, Any] + response_id: str diff --git a/camel/agents/utils.py b/camel/agents/_utils.py similarity index 61% rename from camel/agents/utils.py rename to camel/agents/_utils.py index 50d149399a..fba08be031 100644 --- a/camel/agents/utils.py +++ b/camel/agents/_utils.py @@ -16,13 +16,17 @@ import re import textwrap import uuid -from typing import Any, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Union from openai.types.chat.chat_completion_message_tool_call import ( ChatCompletionMessageToolCall, Function, ) +from camel.toolkits import FunctionTool +from camel.types import Choice +from camel.types.agents import ToolCallingRecord + logger = logging.getLogger(__name__) @@ -136,3 +140,91 @@ def extract_tool_call( # No tool call found return None + + +def safe_model_dump(obj) -> Dict[str, Any]: + r"""Safely dump a Pydantic model to a dictionary. + + This method attempts to use the `model_dump` method if available, + otherwise it falls back to the `dict` method. + """ + # Check if the `model_dump` method exists (Pydantic v2) + if hasattr(obj, "model_dump"): + return obj.model_dump() + # Fallback to `dict()` method (Pydantic v1) + elif hasattr(obj, "dict"): + return obj.dict() + else: + raise TypeError("The object is not a Pydantic model") + + +def convert_to_function_tool( + tool: Union[FunctionTool, Callable], +) -> FunctionTool: + r"""Convert a tool to a FunctionTool from Callable.""" + return tool if isinstance(tool, FunctionTool) else FunctionTool(tool) + + +def convert_to_schema( + tool: Union[FunctionTool, Callable, Dict[str, Any]], +) -> Dict[str, Any]: + r"""Convert a tool to a schema from Callable or FunctionTool.""" + if isinstance(tool, FunctionTool): + return tool.get_openai_tool_schema() + elif callable(tool): + return FunctionTool(tool).get_openai_tool_schema() + else: + return tool + + +def get_info_dict( + session_id: Optional[str], + usage: Optional[Dict[str, int]], + termination_reasons: List[str], + num_tokens: int, + tool_calls: List[ToolCallingRecord], +) -> Dict[str, Any]: + r"""Returns a dictionary containing information about the chat session. + + Args: + session_id (str, optional): The ID of the chat session. + usage (Dict[str, int], optional): Information about the usage of + the LLM. + termination_reasons (List[str]): The reasons for the termination + of the chat session. + num_tokens (int): The number of tokens used in the chat session. + tool_calls (List[FunctionCallingRecord]): The list of function + calling records, containing the information of called tools. + + Returns: + Dict[str, Any]: The chat session information. + """ + return { + "id": session_id, + "usage": usage, + "termination_reasons": termination_reasons, + "num_tokens": num_tokens, + "tool_calls": tool_calls, + } + + +def handle_logprobs(choice: Choice) -> Optional[List[Dict[str, Any]]]: + if choice.logprobs is None: + return None + + tokens_logprobs = choice.logprobs.content + + if tokens_logprobs is None: + return None + + return [ + { + "token": token_logprob.token, + "logprob": token_logprob.logprob, + "top_logprobs": [ + (top_logprob.token, top_logprob.logprob) + for top_logprob in token_logprob.top_logprobs + ], + } + for token_logprob in tokens_logprobs + ] diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 5123f854f7..7d1884792b 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -29,8 +29,16 @@ ) from openai import AsyncStream, Stream -from pydantic import BaseModel, ConfigDict - +from pydantic import BaseModel + +from camel.agents._types import ModelResponse, ToolCallRequest +from camel.agents._utils import ( + convert_to_function_tool, + convert_to_schema, + get_info_dict, + handle_logprobs, + safe_model_dump, +) from camel.agents.base import BaseAgent from camel.memories import ( AgentMemory, @@ -50,12 +58,12 @@ from camel.types import ( ChatCompletion, ChatCompletionChunk, - Choice, ModelPlatformType, ModelType, OpenAIBackendRole, RoleType, ) +from camel.types.agents import ToolCallingRecord from camel.utils import get_model_encoding if TYPE_CHECKING: @@ -76,149 +84,6 @@ from camel.utils import track_agent -def _convert_to_function_tool( - tool: Union[FunctionTool, Callable], -) -> FunctionTool: - return tool if isinstance(tool, FunctionTool) else FunctionTool(tool) - - -def _convert_to_schema( - tool: Union[FunctionTool, Callable, Dict[str, Any]], -) -> Dict[str, Any]: - if isinstance(tool, FunctionTool): - return tool.get_openai_tool_schema() - elif callable(tool): - return FunctionTool(tool).get_openai_tool_schema() - else: - return tool - - -def _safe_model_dump(obj) -> Dict[str, Any]: - r"""Safely dump a Pydantic model to a dictionary. - - This method attempts to use the `model_dump` method if available, - otherwise it falls back to the `dict` method. - """ - # Check if the `model_dump` method exists (Pydantic v2) - if hasattr(obj, "model_dump"): - return obj.model_dump() - # Fallback to `dict()` method (Pydantic v1) - elif hasattr(obj, "dict"): - return obj.dict() - else: - raise TypeError("The object is not a Pydantic model") - - -def _get_info_dict( - session_id: Optional[str], - usage: Optional[Dict[str, int]], - termination_reasons: List[str], - num_tokens: int, - tool_calls: List[ToolCallingRecord], -) -> Dict[str, Any]: - r"""Returns a dictionary containing information about the chat session. - - Args: - session_id (str, optional): The ID of the chat session. - usage (Dict[str, int], optional): Information about the usage of - the LLM. - termination_reasons (List[str]): The reasons for the termination - of the chat session. - num_tokens (int): The number of tokens used in the chat session. - tool_calls (List[FunctionCallingRecord]): The list of function - calling records, containing the information of called tools. - - Returns: - Dict[str, Any]: The chat session information. - """ - return { - "id": session_id, - "usage": usage, - "termination_reasons": termination_reasons, - "num_tokens": num_tokens, - "tool_calls": tool_calls, - } - - -def _handle_logprobs(choice: Choice) -> Optional[List[Dict[str, Any]]]: - # Process log probabilities, append to the message meta information - if choice.logprobs is None: - return None - - tokens_logprobs = choice.logprobs.content - - if tokens_logprobs is None: - return None - - # Extract and structure logprob information - return [ - { - "token": token_logprob.token, - "logprob": token_logprob.logprob, - "top_logprobs": [ - (top_logprob.token, top_logprob.logprob) - for top_logprob in token_logprob.top_logprobs - ], - } - for token_logprob in tokens_logprobs - ] - - -class _ToolCallRequest(BaseModel): - r"""The request for tool calling.""" - - func_name: str - args: Dict[str, Any] - - -class _ModelResponse(BaseModel): - r"""The response from the model.""" - - model_config = ConfigDict(arbitrary_types_allowed=True) - - response: Union[ChatCompletion, Stream, AsyncStream] - tool_call_request: Optional[_ToolCallRequest] - output_messages: List[BaseMessage] - finish_reasons: List[str] - usage_dict: Dict[str, Any] - response_id: str - - -class ToolCallingRecord(BaseModel): - r"""Historical records of functions called in the conversation. - - Attributes: - func_name (str): The name of the function being called. - args (Dict[str, Any]): The dictionary of arguments passed to - the function. - result (Any): The execution result of calling this function. - """ - - func_name: str - args: Dict[str, Any] - result: Any - - def __str__(self) -> str: - r"""Overridden version of the string function. - - Returns: - str: Modified string to represent the function calling. - """ - return ( - f"Function Execution: {self.func_name}\n" - f"\tArgs: {self.args}\n" - f"\tResult: {self.result}" - ) - - def as_dict(self) -> Dict[str, Any]: - r"""Returns the function calling record as a dictionary. - - Returns: - dict[str, Any]: The function calling record as a dictionary. - """ - return self.model_dump() - - @track_agent(name="ChatAgent") class ChatAgent(BaseAgent): r"""Class for managing conversations of CAMEL Chat Agents. @@ -319,14 +184,14 @@ def __init__( self._internal_tools = { tool.get_function_name(): tool for tool in [ - _convert_to_function_tool(tool) for tool in (tools or []) + convert_to_function_tool(tool) for tool in (tools or []) ] } self._external_tool_schemas = { tool_schema["name"]: tool_schema for tool_schema in [ - _convert_to_schema(tool) for tool in (external_tools or []) + convert_to_schema(tool) for tool in (external_tools or []) ] } @@ -365,13 +230,13 @@ def _get_external_tool_names(self) -> Set[str]: def add_tool(self, tool: Union[FunctionTool, Callable]) -> None: r"""Add a tool to the agent.""" - new_tool = _convert_to_function_tool(tool) + new_tool = convert_to_function_tool(tool) self._internal_tools[new_tool.get_function_name()] = new_tool def add_external_tool( self, tool: Union[FunctionTool, Callable, Dict[str, Any]] ) -> None: - new_tool_schema = _convert_to_schema(tool) + new_tool_schema = convert_to_schema(tool) self._external_tool_schemas[new_tool_schema["name"]] = new_tool_schema def remove_tool(self, tool_name: str) -> bool: @@ -584,7 +449,7 @@ async def astep( def _parse_chatagent_response( self, - response: _ModelResponse, + response: ModelResponse, tool_call_records: List[ToolCallingRecord], num_tokens: int, ) -> ChatAgentResponse: @@ -619,7 +484,7 @@ def _get_model_response( openai_messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]], num_tokens: int, - ) -> _ModelResponse: + ) -> ModelResponse: r"""Internal function for agent step model response.""" response = None @@ -656,7 +521,7 @@ async def _aget_model_response( openai_messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]], num_tokens: int, - ) -> _ModelResponse: + ) -> ModelResponse: r"""Internal function for agent step model response.""" response = None @@ -744,7 +609,7 @@ def _step_get_info( if self.terminated and termination_reason is not None: finish_reasons = [termination_reason] * len(finish_reasons) - return _get_info_dict( + return get_info_dict( response_id, usage_dict, finish_reasons, @@ -754,7 +619,7 @@ def _step_get_info( def _handle_batch_response( self, response: ChatCompletion - ) -> _ModelResponse: + ) -> ModelResponse: r"""Process a batch response from the model and extract the necessary information. @@ -767,7 +632,7 @@ def _handle_batch_response( output_messages: List[BaseMessage] = [] for choice in response.choices: meta_dict = {} - if logprobs_info := _handle_logprobs(choice): + if logprobs_info := handle_logprobs(choice): meta_dict["logprobs_info"] = logprobs_info chat_message = BaseMessage( @@ -786,17 +651,15 @@ def _handle_batch_response( usage = {} if response.usage is not None: - usage = _safe_model_dump(response.usage) + usage = safe_model_dump(response.usage) - tool_call_request: Optional[_ToolCallRequest] = None + tool_call_request: Optional[ToolCallRequest] = None if tool_calls := response.choices[0].message.tool_calls: func_name = tool_calls[0].function.name args = json.loads(tool_calls[0].function.arguments) - tool_call_request = _ToolCallRequest( - func_name=func_name, args=args - ) + tool_call_request = ToolCallRequest(func_name=func_name, args=args) - return _ModelResponse( + return ModelResponse( response=response, tool_call_request=tool_call_request, output_messages=output_messages, @@ -809,7 +672,7 @@ def _handle_stream_response( self, response: Stream[ChatCompletionChunk], prompt_tokens: int, - ) -> _ModelResponse: + ) -> ModelResponse: r"""Process a stream response from the model and extract the necessary information. @@ -836,7 +699,7 @@ def _handle_stream_response( usage_dict = self.get_usage_dict(output_messages, prompt_tokens) # TODO: Handle tool calls - return _ModelResponse( + return ModelResponse( response=response, tool_call_request=None, output_messages=output_messages, @@ -849,7 +712,7 @@ async def _ahandle_stream_response( self, response: AsyncStream[ChatCompletionChunk], prompt_tokens: int, - ) -> _ModelResponse: + ) -> ModelResponse: r"""Process a stream response from the model and extract the necessary information. @@ -876,7 +739,7 @@ async def _ahandle_stream_response( usage_dict = self.get_usage_dict(output_messages, prompt_tokens) # TODO: Handle tool calls - return _ModelResponse( + return ModelResponse( response=response, tool_call_request=None, output_messages=output_messages, @@ -885,18 +748,14 @@ async def _ahandle_stream_response( response_id=response_id, ) - def _handle_choice_chunk( + def _handle_chunk( self, chunk: ChatCompletionChunk, content_dict: defaultdict, finish_reasons_dict: defaultdict, output_messages: List[BaseMessage], - ) -> Optional[BaseMessage]: - r"""Handle a chunk of the model response. - - Returns: - Optional[BaseMessage]: The message if the response is finished. - """ + ) -> None: + r"""Handle a chunk of the model response.""" for choice in chunk.choices: index = choice.index delta = choice.delta @@ -936,7 +795,7 @@ def _step_token_exceed( """ self.terminated = True - info = _get_info_dict( + info = get_info_dict( None, None, [termination_reason], @@ -952,7 +811,7 @@ def _step_token_exceed( def _execute_tool( self, - tool_call_request: _ToolCallRequest, + tool_call_request: ToolCallRequest, ) -> ToolCallingRecord: r"""Execute the tool with arguments following the model's response. @@ -973,7 +832,7 @@ def _execute_tool( async def _aexecute_tool( self, - tool_call_request: _ToolCallRequest, + tool_call_request: ToolCallRequest, ) -> ToolCallingRecord: r"""Execute the async tool with arguments following the model's response. diff --git a/camel/types/agents/__init__.py b/camel/types/agents/__init__.py new file mode 100644 index 0000000000..d8ece36cd4 --- /dev/null +++ b/camel/types/agents/__init__.py @@ -0,0 +1,3 @@ +from .tool_calling_record import ToolCallingRecord + +__all__ = ["ToolCallingRecord"] diff --git a/camel/types/agents/tool_calling_record.py b/camel/types/agents/tool_calling_record.py new file mode 100644 index 0000000000..e465cdb973 --- /dev/null +++ b/camel/types/agents/tool_calling_record.py @@ -0,0 +1,38 @@ +from typing import Any, Dict + +from pydantic import BaseModel + + +class ToolCallingRecord(BaseModel): + r"""Historical records of functions called in the conversation. + + Attributes: + func_name (str): The name of the function being called. + args (Dict[str, Any]): The dictionary of arguments passed to + the function. + result (Any): The execution result of calling this function. + """ + + func_name: str + args: Dict[str, Any] + result: Any + + def __str__(self) -> str: + r"""Overridden version of the string function. + + Returns: + str: Modified string to represent the function calling. + """ + return ( + f"Function Execution: {self.func_name}\n" + f"\tArgs: {self.args}\n" + f"\tResult: {self.result}" + ) + + def as_dict(self) -> Dict[str, Any]: + r"""Returns the function calling record as a dictionary. + + Returns: + dict[str, Any]: The function calling record as a dictionary. + """ + return self.model_dump() From 00d1bf03b2f77b703aea88458292adeb53d5c9b9 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Fri, 10 Jan 2025 00:42:32 -0600 Subject: [PATCH 14/28] delete None values in config --- camel/agents/_types.py | 3 ++- camel/configs/base_config.py | 23 ++++++++++------------- camel/configs/openai_config.py | 27 +-------------------------- camel/models/openai_model.py | 10 ++++++---- 4 files changed, 19 insertions(+), 44 deletions(-) diff --git a/camel/agents/_types.py b/camel/agents/_types.py index 07e0033feb..75dc886b82 100644 --- a/camel/agents/_types.py +++ b/camel/agents/_types.py @@ -1,9 +1,10 @@ from typing import Any, Dict, List, Optional, Union +from openai import AsyncStream, Stream from pydantic import BaseModel, ConfigDict from camel.messages import BaseMessage -from camel.types import AsyncStream, ChatCompletion, Stream +from camel.types import ChatCompletion class ToolCallRequest(BaseModel): diff --git a/camel/configs/base_config.py b/camel/configs/base_config.py index 5a6e748195..bd15ec90bd 100644 --- a/camel/configs/base_config.py +++ b/camel/configs/base_config.py @@ -66,6 +66,8 @@ def as_dict(self) -> dict[str, Any]: This method converts the current configuration object to a dictionary representation, which can be used for serialization or other purposes. + The dictionary won't contain None values, as some API does not support + None values. (Like tool in OpenAI beta API) Returns: dict[str, Any]: A dictionary representation of the current @@ -73,17 +75,12 @@ def as_dict(self) -> dict[str, Any]: """ config_dict = self.model_dump() - tools_schema = None - if self.tools: - from camel.toolkits import FunctionTool + # Convert tools to OpenAI tool schema + config_dict["tools"] = ( + [tool.get_openai_tool_schema() for tool in self.tools] + if self.tools + else None + ) - tools_schema = [] - for tool in self.tools: - if not isinstance(tool, FunctionTool): - raise ValueError( - f"The tool {tool} should " - "be an instance of `FunctionTool`." - ) - tools_schema.append(tool.get_openai_tool_schema()) - config_dict["tools"] = tools_schema - return config_dict + # Remove None values + return {k: v for k, v in config_dict.items() if v is not None} diff --git a/camel/configs/openai_config.py b/camel/configs/openai_config.py index b787a3bf0e..8ee7fdca02 100644 --- a/camel/configs/openai_config.py +++ b/camel/configs/openai_config.py @@ -13,7 +13,7 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from __future__ import annotations -from typing import Any, Dict, Optional, Sequence, Type, Union +from typing import Dict, Optional, Sequence, Type, Union from pydantic import BaseModel, Field @@ -109,30 +109,5 @@ class ChatGPTConfig(BaseConfig): user: str = "" tool_choice: Optional[Union[Dict[str, str], str]] = None - def as_dict(self) -> Dict[str, Any]: - r"""Convert the current configuration to a dictionary. - - This method converts the current configuration object to a dictionary - representation, which can be used for serialization or other purposes. - - Returns: - Dict[str, Any]: A dictionary representation of the current - configuration. - """ - from camel.toolkits import FunctionTool - - config_dict = self.model_dump() - if self.tools: - tools_schema = [] - for tool in self.tools: - if not isinstance(tool, FunctionTool): - raise ValueError( - f"The tool {tool} should " - "be an instance of `FunctionTool`." - ) - tools_schema.append(tool.get_openai_tool_schema()) - config_dict["tools"] = tools_schema - return config_dict - OPENAI_API_PARAMS = {param for param in ChatGPTConfig.model_fields.keys()} diff --git a/camel/models/openai_model.py b/camel/models/openai_model.py index 6ad2bbe75e..585b8a6935 100644 --- a/camel/models/openai_model.py +++ b/camel/models/openai_model.py @@ -193,7 +193,7 @@ def _request_chat_completion( for tool in tools: function_dict = tool.get('function', {}) function_dict.pop("strict", None) - request_config["tools"] = tools + request_config["tools"] = tools return self._client.chat.completions.create( messages=messages, @@ -212,7 +212,7 @@ async def _arequest_chat_completion( for tool in tools: function_dict = tool.get('function', {}) function_dict.pop("strict", None) - request_config["tools"] = tools + request_config["tools"] = tools return await self._async_client.chat.completions.create( messages=messages, @@ -230,7 +230,8 @@ def _request_parse( request_config["response_format"] = response_format request_config.pop("stream", None) - request_config["tools"] = tools + if tools is not None: + request_config["tools"] = tools return self._client.beta.chat.completions.parse( messages=messages, @@ -248,7 +249,8 @@ async def _arequest_parse( request_config["response_format"] = response_format request_config.pop("stream", None) - request_config["tools"] = tools + if tools is not None: + request_config["tools"] = tools return await self._async_client.beta.chat.completions.parse( messages=messages, From e8c8148346abf303ca5e9aaa66e4cc09bb5062af Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Sat, 11 Jan 2025 12:01:33 -0600 Subject: [PATCH 15/28] fix incompatible types --- camel/agents/_types.py | 13 +++++++++++++ camel/agents/multi_hop_generator_agent.py | 2 +- camel/models/anthropic_model.py | 2 +- camel/models/azure_openai_model.py | 8 ++++---- camel/models/base_model.py | 10 +++++----- camel/models/cohere_model.py | 2 +- camel/models/deepseek_model.py | 8 ++++---- camel/models/gemini_model.py | 8 ++++---- camel/models/groq_model.py | 8 ++++---- camel/models/model_manager.py | 8 ++++---- camel/models/nvidia_model.py | 8 ++++---- camel/models/ollama_model.py | 12 ++++++------ camel/models/openai_compatible_model.py | 8 ++++---- camel/models/openai_model.py | 10 +++++----- camel/models/qwen_model.py | 8 ++++---- camel/models/samba_model.py | 12 ++++++------ camel/models/sglang_model.py | 8 ++++---- camel/models/stub_model.py | 7 ++++--- camel/models/togetherai_model.py | 8 ++++---- camel/models/vllm_model.py | 8 ++++---- camel/models/yi_model.py | 8 ++++---- camel/models/zhipuai_model.py | 8 ++++---- camel/types/agents/__init__.py | 13 +++++++++++++ camel/types/agents/tool_calling_record.py | 13 +++++++++++++ 24 files changed, 120 insertions(+), 80 deletions(-) diff --git a/camel/agents/_types.py b/camel/agents/_types.py index 75dc886b82..57de46ecfa 100644 --- a/camel/agents/_types.py +++ b/camel/agents/_types.py @@ -1,3 +1,16 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from typing import Any, Dict, List, Optional, Union from openai import AsyncStream, Stream diff --git a/camel/agents/multi_hop_generator_agent.py b/camel/agents/multi_hop_generator_agent.py index a232fce846..ed99bb4c82 100644 --- a/camel/agents/multi_hop_generator_agent.py +++ b/camel/agents/multi_hop_generator_agent.py @@ -56,7 +56,7 @@ def __init__(self, **kwargs: Any): Supporting Facts: [List of relevant text segments used] """ # noqa: E501 ) - self.system_message = BaseMessage.make_assistant_message( + self._system_message = BaseMessage.make_assistant_message( role_name='Assistant', content=system_text ) diff --git a/camel/models/anthropic_model.py b/camel/models/anthropic_model.py index 25e55e3d76..485d47e555 100644 --- a/camel/models/anthropic_model.py +++ b/camel/models/anthropic_model.py @@ -145,7 +145,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ): + ) -> ChatCompletion: r"""Run inference of Anthropic chat completion. Args: diff --git a/camel/models/azure_openai_model.py b/camel/models/azure_openai_model.py index 0b26769550..19a4831513 100644 --- a/camel/models/azure_openai_model.py +++ b/camel/models/azure_openai_model.py @@ -14,7 +14,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncAzureOpenAI, AzureOpenAI, Stream +from openai import AsyncAzureOpenAI, AsyncStream, AzureOpenAI, Stream from pydantic import BaseModel from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig @@ -146,7 +146,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of Azure OpenAI chat completion. Args: @@ -154,9 +154,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ response = await self._async_client.chat.completions.create( messages=messages, diff --git a/camel/models/base_model.py b/camel/models/base_model.py index 4c7b009852..7d7018b9a9 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -14,7 +14,7 @@ from abc import ABC, abstractmethod from typing import Any, Coroutine, Dict, List, Optional, Type, Union -from openai import Stream +from openai import AsyncStream, Stream from pydantic import BaseModel from camel.messages import OpenAIMessage @@ -90,7 +90,7 @@ def _arun( response_format: Optional[Type[BaseModel]], tools: Optional[List[Dict[str, Any]]], ) -> Coroutine[ - Any, Any, Union[ChatCompletion, Stream[ChatCompletionChunk]] + Any, Any, Union[ChatCompletion, AsyncStream[ChatCompletionChunk]] ]: pass @@ -130,7 +130,7 @@ async def arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs the query to the backend model asynchronously. Args: @@ -144,9 +144,9 @@ async def arun( (default: :obj:`None`) Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ response_format = ( self.model_config_dict.get("response_format", None) diff --git a/camel/models/cohere_model.py b/camel/models/cohere_model.py index e328d37751..ad5ff79dff 100644 --- a/camel/models/cohere_model.py +++ b/camel/models/cohere_model.py @@ -69,7 +69,7 @@ def __init__( model_type, model_config_dict, api_key, url, token_counter ) self._client = cohere.ClientV2(api_key=self._api_key) - self._async_client = cohere.AsyncClientV2(api_key=self._api) + self._async_client = cohere.AsyncClientV2(api_key=self._api_key) def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion: if response.usage and response.usage.tokens: diff --git a/camel/models/deepseek_model.py b/camel/models/deepseek_model.py index de1a0c6cc1..0670d175a1 100644 --- a/camel/models/deepseek_model.py +++ b/camel/models/deepseek_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig @@ -132,7 +132,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of DeepSeek chat completion. Args: @@ -140,9 +140,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletion + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ response = await self._async_client.chat.completions.create( messages=messages, diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index 07d9f4c790..50b3ea3aef 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -14,7 +14,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import Gemini_API_PARAMS, GeminiConfig @@ -118,7 +118,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of Gemini chat completion asynchronously. Args: @@ -126,9 +126,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ response = await self._async_client.chat.completions.create( messages=messages, diff --git a/camel/models/groq_model.py b/camel/models/groq_model.py index 4616bf8c2f..691b8cdccd 100644 --- a/camel/models/groq_model.py +++ b/camel/models/groq_model.py @@ -14,7 +14,7 @@ import os from typing import Any, Dict, List, Optional, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import GROQ_API_PARAMS, GroqConfig @@ -131,7 +131,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. Args: @@ -139,9 +139,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ response = await self._async_client.chat.completions.create( messages=messages, diff --git a/camel/models/model_manager.py b/camel/models/model_manager.py index e96b81a8de..f072af250c 100644 --- a/camel/models/model_manager.py +++ b/camel/models/model_manager.py @@ -25,7 +25,7 @@ Union, ) -from openai import Stream +from openai import AsyncStream, Stream from pydantic import BaseModel from camel.messages import OpenAIMessage @@ -222,7 +222,7 @@ async def arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Process a list of messages by selecting a model based on the scheduling strategy. Sends the entire list of messages to the selected model, @@ -233,9 +233,9 @@ async def arun( history in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ self.current_model = self.scheduling_strategy() diff --git a/camel/models/nvidia_model.py b/camel/models/nvidia_model.py index 57d617b26c..e394e696c3 100644 --- a/camel/models/nvidia_model.py +++ b/camel/models/nvidia_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from openai.types.chat import ( ChatCompletion, ChatCompletionChunk, @@ -89,7 +89,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of NVIDIA chat completion. Args: @@ -97,9 +97,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ # Remove tool-related parameters if no tools are specified diff --git a/camel/models/ollama_model.py b/camel/models/ollama_model.py index 8d5f88f037..1d38023ae9 100644 --- a/camel/models/ollama_model.py +++ b/camel/models/ollama_model.py @@ -15,7 +15,7 @@ import subprocess from typing import Any, Dict, List, Optional, Type, Union -from openai import OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import OLLAMA_API_PARAMS, OllamaConfig @@ -76,7 +76,7 @@ def __init__( api_key="Set-but-ignored", # required but ignored base_url=self._url, ) - self._async_client = OpenAI( + self._async_client = AsyncOpenAI( timeout=180, max_retries=3, api_key="Set-but-ignored", # required but ignored @@ -131,7 +131,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. Args: @@ -139,16 +139,16 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ if self.model_config_dict.get("response_format"): # stream is not supported in beta.chat.completions.parse if "stream" in self.model_config_dict: del self.model_config_dict["stream"] - response = self._async_client.beta.chat.completions.parse( + response = await self._async_client.beta.chat.completions.parse( messages=messages, model=self.model_type, **self.model_config_dict, diff --git a/camel/models/openai_compatible_model.py b/camel/models/openai_compatible_model.py index a74f335d3f..d77cded87f 100644 --- a/camel/models/openai_compatible_model.py +++ b/camel/models/openai_compatible_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.messages import OpenAIMessage @@ -104,7 +104,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion in async mode. Args: @@ -112,9 +112,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ response = await self._async_client.chat.completions.create( messages=messages, diff --git a/camel/models/openai_model.py b/camel/models/openai_model.py index 585b8a6935..541c1d4419 100644 --- a/camel/models/openai_model.py +++ b/camel/models/openai_model.py @@ -15,7 +15,7 @@ import warnings from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig @@ -161,7 +161,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]], tools: Optional[List[Dict[str, Any]]], - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion in async mode. Args: @@ -173,9 +173,9 @@ async def _arun( use for the request. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ if response_format: return await self._arequest_parse(messages, response_format, tools) @@ -205,7 +205,7 @@ async def _arequest_chat_completion( self, messages: List[OpenAIMessage], tools: Optional[List[Dict[str, Any]]], - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: request_config = self.model_config_dict.copy() if tools is not None: diff --git a/camel/models/qwen_model.py b/camel/models/qwen_model.py index 4275376328..2a8607a0af 100644 --- a/camel/models/qwen_model.py +++ b/camel/models/qwen_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import QWEN_API_PARAMS, QwenConfig @@ -94,7 +94,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of Qwen chat completion. Args: @@ -102,9 +102,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ response = await self._async_client.chat.completions.create( messages=messages, diff --git a/camel/models/samba_model.py b/camel/models/samba_model.py index f89ed11929..969df2d48f 100644 --- a/camel/models/samba_model.py +++ b/camel/models/samba_model.py @@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Type, Union import httpx -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import ( @@ -160,7 +160,7 @@ async def _arun( # type: ignore[misc] messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs SambaNova's service. Args: @@ -168,9 +168,9 @@ async def _arun( # type: ignore[misc] in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ if "tools" in self.model_config_dict: del self.model_config_dict["tools"] @@ -435,7 +435,7 @@ def stream(self) -> bool: async def _arun_streaming( self, messages: List[OpenAIMessage] - ) -> Stream[ChatCompletionChunk]: + ) -> AsyncStream[ChatCompletionChunk]: r"""Handles streaming inference with SambaNova's API. Args: @@ -443,7 +443,7 @@ async def _arun_streaming( chat history in OpenAI API format. Returns: - Stream[ChatCompletionChunk]: A generator yielding + AsyncStream[ChatCompletionChunk]: A generator yielding `ChatCompletionChunk` objects as they are received from the API. diff --git a/camel/models/sglang_model.py b/camel/models/sglang_model.py index 30db603e25..14a4dd57b5 100644 --- a/camel/models/sglang_model.py +++ b/camel/models/sglang_model.py @@ -16,7 +16,7 @@ import time from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import SGLANG_API_PARAMS, SGLangConfig @@ -190,7 +190,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. Args: @@ -198,9 +198,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ # Ensure server is running diff --git a/camel/models/stub_model.py b/camel/models/stub_model.py index 8edd87dce5..066dc9e57b 100644 --- a/camel/models/stub_model.py +++ b/camel/models/stub_model.py @@ -14,7 +14,7 @@ import time from typing import Any, Dict, List, Optional, Type, Union -from openai import Stream +from openai import AsyncStream, Stream from pydantic import BaseModel from camel.messages import OpenAIMessage @@ -80,12 +80,13 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Run fake inference by returning a fixed string. All arguments are unused for the dummy model. Returns: - Dict[str, Any]: Response in the OpenAI API format. + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: + The response from the dummy model. """ ARBITRARY_STRING = "Lorem Ipsum" response: ChatCompletion = ChatCompletion( diff --git a/camel/models/togetherai_model.py b/camel/models/togetherai_model.py index a32796c046..8601e697c2 100644 --- a/camel/models/togetherai_model.py +++ b/camel/models/togetherai_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import TOGETHERAI_API_PARAMS, TogetherAIConfig @@ -95,7 +95,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. Args: @@ -103,9 +103,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ # Use OpenAI cilent as interface call Together AI # Reference: https://docs.together.ai/docs/openai-api-compatibility diff --git a/camel/models/vllm_model.py b/camel/models/vllm_model.py index 4dd9204200..353655e6f9 100644 --- a/camel/models/vllm_model.py +++ b/camel/models/vllm_model.py @@ -15,7 +15,7 @@ import subprocess from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import VLLM_API_PARAMS, VLLMConfig @@ -133,7 +133,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. Args: @@ -141,9 +141,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ response = await self._async_client.chat.completions.create( diff --git a/camel/models/yi_model.py b/camel/models/yi_model.py index 5ec6ea84e8..4478104561 100644 --- a/camel/models/yi_model.py +++ b/camel/models/yi_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import YI_API_PARAMS, YiConfig @@ -93,7 +93,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of Yi chat completion. Args: @@ -101,9 +101,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ response = await self._async_client.chat.completions.create( messages=messages, diff --git a/camel/models/zhipuai_model.py b/camel/models/zhipuai_model.py index e3b4c0b7e5..f737cb2f82 100644 --- a/camel/models/zhipuai_model.py +++ b/camel/models/zhipuai_model.py @@ -15,7 +15,7 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncOpenAI, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import ZHIPUAI_API_PARAMS, ZhipuAIConfig @@ -93,7 +93,7 @@ async def _arun( messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]] = None, tools: Optional[List[Dict[str, Any]]] = None, - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. Args: @@ -101,9 +101,9 @@ async def _arun( in OpenAI API format. Returns: - Union[ChatCompletion, Stream[ChatCompletionChunk]]: + Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or - `Stream[ChatCompletionChunk]` in the stream mode. + `AsyncStream[ChatCompletionChunk]` in the stream mode. """ # Use OpenAI cilent as interface call ZhipuAI # Reference: https://open.bigmodel.cn/dev/api#openai_sdk diff --git a/camel/types/agents/__init__.py b/camel/types/agents/__init__.py index d8ece36cd4..da547307e1 100644 --- a/camel/types/agents/__init__.py +++ b/camel/types/agents/__init__.py @@ -1,3 +1,16 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from .tool_calling_record import ToolCallingRecord __all__ = ["ToolCallingRecord"] diff --git a/camel/types/agents/tool_calling_record.py b/camel/types/agents/tool_calling_record.py index e465cdb973..7be7f45db3 100644 --- a/camel/types/agents/tool_calling_record.py +++ b/camel/types/agents/tool_calling_record.py @@ -1,3 +1,16 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from typing import Any, Dict from pydantic import BaseModel From 520c29a2dca72ea9be8d435955c9d89699d7c7f1 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Mon, 13 Jan 2025 12:17:05 -0600 Subject: [PATCH 16/28] add function to qwen --- camel/agents/chat_agent.py | 12 ++++++------ camel/configs/qwen_config.py | 27 +++++++++++++------------ camel/models/base_model.py | 7 +------ camel/models/openai_model.py | 10 ++++++++-- camel/models/qwen_model.py | 38 +++++++++++++++++++++++++++++++++--- examples/simple_agent.py | 32 ++++++++++++++++++++++++++++++ 6 files changed, 96 insertions(+), 30 deletions(-) create mode 100644 examples/simple_agent.py diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 7d1884792b..628c56e3bc 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -379,9 +379,9 @@ def step( break - self._log_final_output(response.output_messages) + self._record_final_output(response.output_messages) - return self._parse_chatagent_response( + return self._convert_to_chatagent_response( response, tool_call_records, num_tokens ) @@ -441,13 +441,13 @@ async def astep( break - self._log_final_output(response.output_messages) + self._record_final_output(response.output_messages) - return self._parse_chatagent_response( + return self._convert_to_chatagent_response( response, tool_call_records, num_tokens ) - def _parse_chatagent_response( + def _convert_to_chatagent_response( self, response: ModelResponse, tool_call_records: List[ToolCallingRecord], @@ -469,7 +469,7 @@ def _parse_chatagent_response( info=info, ) - def _log_final_output(self, output_messages: List[BaseMessage]) -> None: + def _record_final_output(self, output_messages: List[BaseMessage]) -> None: r"""Log final messages or warnings about multiple responses.""" if len(output_messages) == 1: self.record_message(output_messages[0]) diff --git a/camel/configs/qwen_config.py b/camel/configs/qwen_config.py index 91a962a780..8698b5875d 100644 --- a/camel/configs/qwen_config.py +++ b/camel/configs/qwen_config.py @@ -13,10 +13,11 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from __future__ import annotations -from typing import ClassVar, Optional, Union +from typing import Dict, List, Optional, Union + +from pydantic import Field from camel.configs.base_config import BaseConfig -from camel.types import NOT_GIVEN, NotGiven class QwenConfig(BaseConfig): @@ -52,16 +53,16 @@ class QwenConfig(BaseConfig): keeping other parameters unchanged, the model is likely to return the same result. (default: :obj:`None`) - stop (str or list, optional): Using the stop parameter, the model will - automatically stop generating text when it is about to include the - specified string or token_id. You can use the stop parameter to - control the output of the model by passing sensitive words. - (default: :obj:`None`) - tools (list, optional): Specifies an array of tools that the model can + stop (Union[str, List], optional): Using the stop parameter, the model + will automatically stop generating text when it is about to + include the specified string or token_id. You can use the stop + parameter to control the output of the model by passing sensitive + words. (default: :obj:`None`) + tools (List, optional): Specifies an array of tools that the model can call. It can contain one or more tool objects. During a function call process, the model will select one tool from the array. (default: :obj:`None`) - extra_body (dict, optional): Additional parameters to be sent to the + extra_body (Dict, optional): Additional parameters to be sent to the Qwen API. If you want to enable internet search, you can set this parameter to `{"enable_search": True}`. (default: :obj:`{"enable_search": False}`) @@ -74,11 +75,11 @@ class QwenConfig(BaseConfig): temperature: float = 0.3 top_p: float = 0.9 presence_penalty: float = 0.0 - response_format: ClassVar[dict] = {"type": "text"} - max_tokens: Union[int, NotGiven] = NOT_GIVEN + response_format: Dict = Field(default_factory=lambda: {"type": "text"}) + max_tokens: Optional[int] = None seed: Optional[int] = None - stop: Optional[Union[str, list]] = None - extra_body: ClassVar[dict] = {"enable_search": False} + stop: Optional[Union[str, List]] = None + extra_body: Dict = Field(default_factory=lambda: {"enable_search": False}) def __init__(self, include_usage: bool = True, **kwargs): super().__init__(**kwargs) diff --git a/camel/models/base_model.py b/camel/models/base_model.py index 7d7018b9a9..dabb383b16 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -117,12 +117,7 @@ def run( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ - response_format = ( - self.model_config_dict.get("response_format", None) - or response_format - ) - # If tools are empty, make it None - tools = self.model_config_dict.get("tools", None) or tools or None + tools = tools or self.model_config_dict.get("tools", None) return self._run(messages, response_format, tools) async def arun( diff --git a/camel/models/openai_model.py b/camel/models/openai_model.py index 541c1d4419..607eb677f5 100644 --- a/camel/models/openai_model.py +++ b/camel/models/openai_model.py @@ -151,6 +151,9 @@ def _run( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ + response_format = response_format or self.model_config_dict.get( + "response_format", None + ) if response_format: return self._request_parse(messages, response_format, tools) else: @@ -177,6 +180,9 @@ async def _arun( `ChatCompletion` in the non-stream mode, or `AsyncStream[ChatCompletionChunk]` in the stream mode. """ + response_format = response_format or self.model_config_dict.get( + "response_format", None + ) if response_format: return await self._arequest_parse(messages, response_format, tools) else: @@ -189,7 +195,7 @@ def _request_chat_completion( ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: request_config = self.model_config_dict.copy() - if tools is not None: + if tools: for tool in tools: function_dict = tool.get('function', {}) function_dict.pop("strict", None) @@ -208,7 +214,7 @@ async def _arequest_chat_completion( ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: request_config = self.model_config_dict.copy() - if tools is not None: + if tools: for tool in tools: function_dict = tool.get('function', {}) function_dict.pop("strict", None) diff --git a/camel/models/qwen_model.py b/camel/models/qwen_model.py index 2a8607a0af..e7276500cf 100644 --- a/camel/models/qwen_model.py +++ b/camel/models/qwen_model.py @@ -116,8 +116,8 @@ async def _arun( def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[Dict[str, Any]]] = None, + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Qwen chat completion. @@ -130,13 +130,45 @@ def _run( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ + request_config = self._prepare_request( + messages, response_format, tools + ) + response = self._client.chat.completions.create( messages=messages, model=self.model_type, - **self.model_config_dict, + **request_config, ) return response + def _prepare_request( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], + ) -> Dict[str, Any]: + request_config = self.model_config_dict.copy() + + if tools: + request_config["tools"] = tools + + if response_format is None: + return request_config + + # get all keys of the response_format + response_format_keys = response_format.model_fields.keys() + additional_prompt = ( + "The response should be in JSON format with the following keys: " + f"{', '.join(response_format_keys)}." + ) + user_message = messages[-1] + user_message["content"] = ( + f"{user_message['content']}\n{additional_prompt}" + ) + + request_config["response_format"] = {"type": "json_object"} + return request_config + @property def token_counter(self) -> BaseTokenCounter: r"""Initialize the token counter for the model backend. diff --git a/examples/simple_agent.py b/examples/simple_agent.py new file mode 100644 index 0000000000..a122870b43 --- /dev/null +++ b/examples/simple_agent.py @@ -0,0 +1,32 @@ +from pydantic import BaseModel + +from camel.agents import ChatAgent +from camel.models import ModelFactory +from camel.toolkits import WeatherToolkit +from camel.types import ModelPlatformType, ModelType + +model = ModelFactory.create( + model_platform=ModelPlatformType.QWEN, + model_type=ModelType.QWEN_TURBO, +) + + +class ResponseFormat(BaseModel): + weather: str + time: str + + +agent = ChatAgent(model=model, tools=[WeatherToolkit().get_weather_data]) + +resp = agent.step( + "What's the current weather in New York?", + response_format=ResponseFormat, +) +print(resp.msg.content) + + +# resp = agent.step( +# "Format your last response.", +# response_format=ResponseFormat, +# ) +# print(resp.msg.content) From 8faf8574791c8d50326cfd16fcbc82b3a7bda24b Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Mon, 13 Jan 2025 12:42:54 -0600 Subject: [PATCH 17/28] add function to qwen --- camel/models/_utils.py | 52 +++++++++++++++++++++++++++++++++ camel/models/anthropic_model.py | 4 +-- camel/models/qwen_model.py | 27 ++++++++--------- camel/utils/__init__.py | 2 -- camel/utils/commons.py | 36 ----------------------- examples/simple_agent.py | 22 ++++++++++---- 6 files changed, 82 insertions(+), 61 deletions(-) create mode 100644 camel/models/_utils.py diff --git a/camel/models/_utils.py b/camel/models/_utils.py new file mode 100644 index 0000000000..47978a945b --- /dev/null +++ b/camel/models/_utils.py @@ -0,0 +1,52 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +import textwrap +from typing import Optional, Type + +from pydantic import BaseModel + + +def get_prompt_with_response_format( + response_format: Optional[Type[BaseModel]], + user_message: str, +) -> str: + """ + This function generates a prompt based on the provided Pydantic model and + user message. + + Args: + response_format (Optional[Type[BaseModel]]): The Pydantic model class. + user_message (str): The user message to be used in the prompt. + + Returns: + str: A prompt string for the LLM. + """ + if response_format is None: + return user_message + + json_schema = response_format.model_json_schema() + updated_prompt = textwrap.dedent( + f"""\ + Given the user message, please generate a JSON response adhering + to the following JSON schema: + {json_schema} + Make sure the JSON response is valid and matches the EXACT structure + defined in the schema. Your result should only be a valid json + object, without any other text or comments. + + Following is the original user message: + {user_message} + """ + ) + return updated_prompt diff --git a/camel/models/anthropic_model.py b/camel/models/anthropic_model.py index 485d47e555..6717d6a642 100644 --- a/camel/models/anthropic_model.py +++ b/camel/models/anthropic_model.py @@ -169,9 +169,7 @@ async def _arun( ) # format response to openai format - response = self._convert_response_from_anthropic_to_openai(response) - - return response + return self._convert_response_from_anthropic_to_openai(response) def check_model_config(self): r"""Check whether the model configuration is valid for anthropic diff --git a/camel/models/qwen_model.py b/camel/models/qwen_model.py index e7276500cf..311e0ac4af 100644 --- a/camel/models/qwen_model.py +++ b/camel/models/qwen_model.py @@ -21,6 +21,7 @@ from camel.configs import QWEN_API_PARAMS, QwenConfig from camel.messages import OpenAIMessage from camel.models import BaseModelBackend +from camel.models._utils import get_prompt_with_response_format from camel.types import ( ChatCompletion, ChatCompletionChunk, @@ -148,25 +149,21 @@ def _prepare_request( tools: Optional[List[Dict[str, Any]]], ) -> Dict[str, Any]: request_config = self.model_config_dict.copy() + user_message = messages[-1] - if tools: - request_config["tools"] = tools - - if response_format is None: - return request_config + if not isinstance(user_message["content"], str): + raise ValueError("Only text messages are supported") - # get all keys of the response_format - response_format_keys = response_format.model_fields.keys() - additional_prompt = ( - "The response should be in JSON format with the following keys: " - f"{', '.join(response_format_keys)}." - ) - user_message = messages[-1] - user_message["content"] = ( - f"{user_message['content']}\n{additional_prompt}" + user_message["content"] = get_prompt_with_response_format( + response_format, user_message["content"] ) + if tools: + request_config["tools"] = tools + elif response_format: + # Improve stability with native response format support + # This config will be unstable if being used with tools + request_config["response_format"] = {"type": "json_object"} - request_config["response_format"] = {"type": "json_object"} return request_config @property diff --git a/camel/utils/__init__.py b/camel/utils/__init__.py index 2215d0d731..5b5d4d8e19 100644 --- a/camel/utils/__init__.py +++ b/camel/utils/__init__.py @@ -22,7 +22,6 @@ download_github_subdirectory, download_tasks, func_string_to_callable, - generate_prompt_for_structured_output, get_first_int, get_prompt_template_key_words, get_pydantic_major_version, @@ -81,5 +80,4 @@ "handle_http_error", "get_pydantic_model", "download_github_subdirectory", - "generate_prompt_for_structured_output", ] diff --git a/camel/utils/commons.py b/camel/utils/commons.py index a131f41770..dff522f8c9 100644 --- a/camel/utils/commons.py +++ b/camel/utils/commons.py @@ -692,39 +692,3 @@ def download_github_subdirectory( download_github_subdirectory( repo, f'{subdir}/{file["name"]}', file_path, branch ) - - -def generate_prompt_for_structured_output( - response_format: Optional[Type[BaseModel]], - user_message: str, -) -> str: - """ - This function generates a prompt based on the provided Pydantic model and - user message. - - Args: - response_format (Type[BaseModel]): The Pydantic model class. - user_message (str): The user message to be used in the prompt. - - Returns: - str: A prompt string for the LLM. - """ - if response_format is None: - return user_message - - json_schema = response_format.model_json_schema() - sys_prompt = ( - "Given the user message, please generate a JSON response adhering " - "to the following JSON schema:\n" - f"{json_schema}\n" - "Make sure the JSON response is valid and matches the EXACT structure " - "defined in the schema. Your result should only be a valid json " - "object, without any other text or comments.\n" - ) - user_prompt = f"User message: {user_message}\n" - - final_prompt = f""" - {sys_prompt} - {user_prompt} - """ - return final_prompt diff --git a/examples/simple_agent.py b/examples/simple_agent.py index a122870b43..3f01abbeaf 100644 --- a/examples/simple_agent.py +++ b/examples/simple_agent.py @@ -1,8 +1,20 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from pydantic import BaseModel from camel.agents import ChatAgent from camel.models import ModelFactory -from camel.toolkits import WeatherToolkit from camel.types import ModelPlatformType, ModelType model = ModelFactory.create( @@ -12,14 +24,14 @@ class ResponseFormat(BaseModel): - weather: str - time: str + celsius: str + fahrenheit: str -agent = ChatAgent(model=model, tools=[WeatherToolkit().get_weather_data]) +agent = ChatAgent(model=model) resp = agent.step( - "What's the current weather in New York?", + "At what tempreature does water boil?", response_format=ResponseFormat, ) print(resp.msg.content) From 2c96e488aeb9ff01d1abba68df65247ed44107b7 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Mon, 13 Jan 2025 13:21:23 -0600 Subject: [PATCH 18/28] add qwen support --- camel/models/_utils.py | 43 +++++++++++++++++++------------------- camel/models/base_model.py | 6 +----- camel/models/qwen_model.py | 11 ++-------- examples/simple_agent.py | 5 +++-- 4 files changed, 28 insertions(+), 37 deletions(-) diff --git a/camel/models/_utils.py b/camel/models/_utils.py index 47978a945b..8e1d1fb149 100644 --- a/camel/models/_utils.py +++ b/camel/models/_utils.py @@ -16,37 +16,38 @@ from pydantic import BaseModel +from camel.messages import OpenAIMessage -def get_prompt_with_response_format( + +def try_modify_message_with_format( + message: OpenAIMessage, response_format: Optional[Type[BaseModel]], - user_message: str, -) -> str: - """ - This function generates a prompt based on the provided Pydantic model and - user message. +) -> None: + r"""Modifies the content of the message to include the instruction of using + the response format. + + The message will not be modified in the following cases: + - response_format is None + - message content is not a string Args: response_format (Optional[Type[BaseModel]]): The Pydantic model class. - user_message (str): The user message to be used in the prompt. - - Returns: - str: A prompt string for the LLM. + message (OpenAIMessage): The message to be modified. """ if response_format is None: - return user_message + return + + if not isinstance(message["content"], str): + return json_schema = response_format.model_json_schema() updated_prompt = textwrap.dedent( f"""\ - Given the user message, please generate a JSON response adhering - to the following JSON schema: - {json_schema} - Make sure the JSON response is valid and matches the EXACT structure - defined in the schema. Your result should only be a valid json - object, without any other text or comments. + {message["content"]} - Following is the original user message: - {user_message} - """ + Please generate a JSON response adhering to the following JSON schema: + {json_schema} + Make sure the JSON response is valid and matches the EXACT structure defined in the schema. Your result should only be a valid json object, without any other text or comments. + """ # noqa: E501 ) - return updated_prompt + message["content"] = updated_prompt diff --git a/camel/models/base_model.py b/camel/models/base_model.py index dabb383b16..44dca8de6c 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -143,12 +143,8 @@ async def arun( `ChatCompletion` in the non-stream mode, or `AsyncStream[ChatCompletionChunk]` in the stream mode. """ - response_format = ( - self.model_config_dict.get("response_format", None) - or response_format - ) # If tools are empty, make it None - tools = self.model_config_dict.get("tools", None) or tools or None + tools = tools or self.model_config_dict.get("tools", None) return await self._arun(messages, response_format, tools) @abstractmethod diff --git a/camel/models/qwen_model.py b/camel/models/qwen_model.py index 311e0ac4af..2dbdcebf6c 100644 --- a/camel/models/qwen_model.py +++ b/camel/models/qwen_model.py @@ -21,7 +21,7 @@ from camel.configs import QWEN_API_PARAMS, QwenConfig from camel.messages import OpenAIMessage from camel.models import BaseModelBackend -from camel.models._utils import get_prompt_with_response_format +from camel.models._utils import try_modify_message_with_format from camel.types import ( ChatCompletion, ChatCompletionChunk, @@ -149,14 +149,7 @@ def _prepare_request( tools: Optional[List[Dict[str, Any]]], ) -> Dict[str, Any]: request_config = self.model_config_dict.copy() - user_message = messages[-1] - - if not isinstance(user_message["content"], str): - raise ValueError("Only text messages are supported") - - user_message["content"] = get_prompt_with_response_format( - response_format, user_message["content"] - ) + try_modify_message_with_format(messages[-1], response_format) if tools: request_config["tools"] = tools elif response_format: diff --git a/examples/simple_agent.py b/examples/simple_agent.py index 3f01abbeaf..ec0b9c2144 100644 --- a/examples/simple_agent.py +++ b/examples/simple_agent.py @@ -15,6 +15,7 @@ from camel.agents import ChatAgent from camel.models import ModelFactory +from camel.toolkits import WeatherToolkit from camel.types import ModelPlatformType, ModelType model = ModelFactory.create( @@ -28,10 +29,10 @@ class ResponseFormat(BaseModel): fahrenheit: str -agent = ChatAgent(model=model) +agent = ChatAgent(model=model, tools=[WeatherToolkit().get_weather_data]) resp = agent.step( - "At what tempreature does water boil?", + "What's the temperature in Beijing?", response_format=ResponseFormat, ) print(resp.msg.content) From 995bbb3b6c3ef94abd3bd9f59eb6b0f1da46c5ab Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Thu, 16 Jan 2025 21:34:04 -0600 Subject: [PATCH 19/28] add support for mistral --- camel/agents/chat_agent.py | 5 ++ camel/configs/qwen_config.py | 63 +++++++++---------- camel/models/_utils.py | 4 ++ camel/models/mistral_model.py | 34 ++++++++-- examples/minimum_agents/mistral.py | 44 +++++++++++++ .../qwen.py} | 9 +-- 6 files changed, 117 insertions(+), 42 deletions(-) create mode 100644 examples/minimum_agents/mistral.py rename examples/{simple_agent.py => minimum_agents/qwen.py} (89%) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 628c56e3bc..ebeb47f4fb 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -385,6 +385,11 @@ def step( response, tool_call_records, num_tokens ) + @property + def chat_history(self) -> List[OpenAIMessage]: + openai_messages, _ = self.memory.get_context() + return openai_messages + async def astep( self, input_message: Union[BaseMessage, str], diff --git a/camel/configs/qwen_config.py b/camel/configs/qwen_config.py index 8698b5875d..646174c04a 100644 --- a/camel/configs/qwen_config.py +++ b/camel/configs/qwen_config.py @@ -15,8 +15,6 @@ from typing import Dict, List, Optional, Union -from pydantic import Field - from camel.configs.base_config import BaseConfig @@ -28,58 +26,59 @@ class QwenConfig(BaseConfig): Args: stream (bool, optional): Whether to stream the response. (default: :obj:`False`) - temperature (float, optional): Controls the diversity and focus of - the generated results. Lower values make the output more focused, - while higher values make it more diverse. (default: :obj:`0.3`) - top_p (float, optional): Controls the diversity and focus of the - generated results. Higher values make the output more diverse, + temperature (float, optional): Controls the diversity and + focus of the generated results. Lower values make the output more + focused, while higher values make it more diverse. + (default: :obj:`0.3`) + top_p (float, optional): Controls the diversity and focus of + the generated results. Higher values make the output more diverse, while lower values make it more focused. (default: :obj:`0.9`) - presence_penalty (float, optional): Controls the repetition of + presence_penalty (float, optional): Controls the repetition content in the generated results. Positive values reduce the repetition of content, while negative values increase it. (default: :obj:`0.0`) - response_format (object, optional): Specifies the format of the - returned content. The available values are `{"type": "text"}` or - `{"type": "json_object"}`. Setting it to `{"type": "json_object"}` - will output a standard JSON string. - (default: :obj:`{"type": "text"}`) - max_tokens (Union[int, NotGiven], optional): Allows the model to + response_format (Optional[Dict[str, str]], optional): Specifies the + format of the returned content. The available values are + `{"type": "text"}` or `{"type": "json_object"}`. Setting it to + `{"type": "json_object"}` will output a standard JSON string. + (default: :obj:`None`) + max_tokens (Optional[int], optional): Allows the model to generate the maximum number of tokens. - (default: :obj:`NOT_GIVEN`) - seed (int, optional): Sets the seed parameter to make the text - generation process more deterministic, typically used to ensure - that the results are consistent across model runs. By passing the - same seed value (specified by you) in each model call while - keeping other parameters unchanged, the model is likely to return - the same result. (default: :obj:`None`) - stop (Union[str, List], optional): Using the stop parameter, the model - will automatically stop generating text when it is about to - include the specified string or token_id. You can use the stop + seed (Optional[int], optional): Sets the seed parameter to make the + text generation process more deterministic, typically used to + ensure that the results are consistent across model runs. By + passing the same seed value (specified by you) in each model call + while keeping other parameters unchanged, the model is likely to + return the same result. + (default: :obj:`None`) + stop (Optional[Union[str, List]], optional): Using the stop parameter, + the model will automatically stop generating text when it is about + to include the specified string or token_id. You can use the stop parameter to control the output of the model by passing sensitive words. (default: :obj:`None`) tools (List, optional): Specifies an array of tools that the model can call. It can contain one or more tool objects. During a function call process, the model will select one tool from the array. (default: :obj:`None`) - extra_body (Dict, optional): Additional parameters to be sent to the - Qwen API. If you want to enable internet search, you can set this - parameter to `{"enable_search": True}`. - (default: :obj:`{"enable_search": False}`) + extra_body (Optional[Dict[str, str]], optional): Additional parameters + to be sent to the Qwen API. If you want to enable internet search, + you can set this parameter to `{"enable_search": True}`. + (default: :obj:`None`) include_usage (bool, optional): When streaming, specifies whether to - include usage information in `stream_options`. (default: - :obj:`True`) + include usage information in `stream_options`. + (default: :obj:`True`) """ stream: bool = False temperature: float = 0.3 top_p: float = 0.9 presence_penalty: float = 0.0 - response_format: Dict = Field(default_factory=lambda: {"type": "text"}) + response_format: Optional[Dict[str, str]] = None max_tokens: Optional[int] = None seed: Optional[int] = None stop: Optional[Union[str, List]] = None - extra_body: Dict = Field(default_factory=lambda: {"enable_search": False}) + extra_body: Optional[Dict[str, str]] = None def __init__(self, include_usage: bool = True, **kwargs): super().__init__(**kwargs) diff --git a/camel/models/_utils.py b/camel/models/_utils.py index 8e1d1fb149..e8b32618bd 100644 --- a/camel/models/_utils.py +++ b/camel/models/_utils.py @@ -29,6 +29,7 @@ def try_modify_message_with_format( The message will not be modified in the following cases: - response_format is None - message content is not a string + - message role is assistant Args: response_format (Optional[Type[BaseModel]]): The Pydantic model class. @@ -40,6 +41,9 @@ def try_modify_message_with_format( if not isinstance(message["content"], str): return + if message["role"] == "assistant": + return + json_schema = response_format.model_json_schema() updated_prompt = textwrap.dedent( f"""\ diff --git a/camel/models/mistral_model.py b/camel/models/mistral_model.py index 598cc59598..3fb52e6fb1 100644 --- a/camel/models/mistral_model.py +++ b/camel/models/mistral_model.py @@ -25,6 +25,7 @@ from camel.configs import MISTRAL_API_PARAMS, MistralConfig from camel.messages import OpenAIMessage from camel.models import BaseModelBackend +from camel.models._utils import try_modify_message_with_format from camel.types import ChatCompletion, ModelType from camel.utils import ( BaseTokenCounter, @@ -213,24 +214,31 @@ async def _arun(self) -> None: # type: ignore[override] def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[Dict[str, Any]]] = None, + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], ) -> ChatCompletion: r"""Runs inference of Mistral chat completion. Args: messages (List[OpenAIMessage]): Message list with the chat history in OpenAI API format. + response_format (Optional[Type[BaseModel]]): The format of the + response for this query. + tools (Optional[List[Dict[str, Any]]]): The tools to use for this + query. Returns: - ChatCompletion. + ChatCompletion: The response from the model. """ + request_config = self._prepare_request( + messages, response_format, tools + ) mistral_messages = self._to_mistral_chatmessage(messages) response = self._client.chat.complete( messages=mistral_messages, model=self.model_type, - **self.model_config_dict, + **request_config, ) openai_response = self._to_openai_response(response) # type: ignore[arg-type] @@ -251,6 +259,24 @@ def _run( return openai_response + def _prepare_request( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], + ) -> Dict[str, Any]: + # TODO: very unstable if tools and response_format are both used + request_config = self.model_config_dict.copy() + try_modify_message_with_format(messages[-1], response_format) + if tools: + request_config["tools"] = tools + elif response_format: + # Improve stability with native response format support + # This config will not be allowed if used with tools + request_config["response_format"] = {"type": "json_object"} + + return request_config + def check_model_config(self): r"""Check whether the model configuration contains any unexpected arguments to Mistral API. diff --git a/examples/minimum_agents/mistral.py b/examples/minimum_agents/mistral.py new file mode 100644 index 0000000000..787193eaf2 --- /dev/null +++ b/examples/minimum_agents/mistral.py @@ -0,0 +1,44 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +from pydantic import BaseModel + +from camel.agents import ChatAgent +from camel.models import ModelFactory +from camel.toolkits import WeatherToolkit +from camel.types import ModelPlatformType, ModelType + +model = ModelFactory.create( + model_platform=ModelPlatformType.MISTRAL, + model_type=ModelType.MISTRAL_8B, +) + + +class ResponseFormat(BaseModel): + celsius: str + fahrenheit: str + + +agent = ChatAgent(model=model, tools=[WeatherToolkit().get_weather_data]) + +resp = agent.step( + "What's the temperature in New York?", + response_format=ResponseFormat, +) + + +print(resp.msg.content) + +for message in agent.chat_history: + print(f"{message['role']}: {message['content']}") diff --git a/examples/simple_agent.py b/examples/minimum_agents/qwen.py similarity index 89% rename from examples/simple_agent.py rename to examples/minimum_agents/qwen.py index ec0b9c2144..9a1531905c 100644 --- a/examples/simple_agent.py +++ b/examples/minimum_agents/qwen.py @@ -11,6 +11,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +import json + from pydantic import BaseModel from camel.agents import ChatAgent @@ -35,11 +37,6 @@ class ResponseFormat(BaseModel): "What's the temperature in Beijing?", response_format=ResponseFormat, ) -print(resp.msg.content) -# resp = agent.step( -# "Format your last response.", -# response_format=ResponseFormat, -# ) -# print(resp.msg.content) +print(ResponseFormat(**json.loads(resp.msg.content))) From 859216be164e2f49d6c8a8dabb4e8f3a417e5bfc Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Thu, 23 Jan 2025 11:43:25 -0600 Subject: [PATCH 20/28] add response format support --- camel/agents/chat_agent.py | 103 +++++++++++++++++- camel/messages/base.py | 4 +- camel/models/base_model.py | 13 ++- camel/models/cohere_model.py | 33 +++++- examples/minimum_agents/cohere_agent.py | 43 ++++++++ .../{mistral.py => mistral_agent.py} | 8 +- .../minimum_agents/{qwen.py => qwen_agent.py} | 4 +- 7 files changed, 190 insertions(+), 18 deletions(-) create mode 100644 examples/minimum_agents/cohere_agent.py rename examples/minimum_agents/{mistral.py => mistral_agent.py} (90%) rename examples/minimum_agents/{qwen.py => qwen_agent.py} (94%) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index ebeb47f4fb..58a01187d3 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -15,6 +15,7 @@ import json import logging +import textwrap from collections import defaultdict from typing import ( TYPE_CHECKING, @@ -28,8 +29,11 @@ Union, ) -from openai import AsyncStream, Stream -from pydantic import BaseModel +from openai import ( + AsyncStream, + Stream, +) +from pydantic import BaseModel, ValidationError from camel.agents._types import ModelResponse, ToolCallRequest from camel.agents._utils import ( @@ -53,6 +57,7 @@ ModelManager, ModelProcessingError, ) +from camel.prompts import TextPrompt from camel.responses import ChatAgentResponse from camel.toolkits import FunctionTool from camel.types import ( @@ -84,6 +89,17 @@ from camel.utils import track_agent +SIMPLE_FORMAT_PROMPT = TextPrompt( + textwrap.dedent( + """\ + Please format the following content: + + {content} + """ + ) +) + + @track_agent(name="ChatAgent") class ChatAgent(BaseAgent): r"""Class for managing conversations of CAMEL Chat Agents. @@ -323,6 +339,71 @@ def record_message(self, message: BaseMessage) -> None: """ self.update_memory(message, OpenAIBackendRole.ASSISTANT) + def _try_format_message( + self, message: BaseMessage, response_format: Type[BaseModel] + ) -> None: + r"""Try to format the message if needed.""" + if message.parsed: + return + + try: + message.parsed = response_format.model_validate_json( + message.content + ) + except ValidationError: + logger.warning(f"Failed to parse response: {message.content}") + + def _format_response_if_needed( + self, + response: ModelResponse, + response_format: Optional[Type[BaseModel]], + ) -> None: + r"""Format the response if needed. + + This function won't format the response under the following cases: + 1. The response format is None (not provided) + 2. The response is empty + """ + if response_format is None: + return + + for message in response.output_messages: + self._try_format_message(message, response_format) + if message.parsed: + continue + + prompt = SIMPLE_FORMAT_PROMPT.format(content=message.content) + openai_message: OpenAIMessage = {"role": "user", "content": prompt} + # Explicitly set the tools to empty list to avoid calling tools + response = self._get_model_response( + [openai_message], response_format, [], 0 + ) + message.content = response.output_messages[0].content + self._try_format_message(message, response_format) + + async def _aformat_response_if_needed( + self, + response: ModelResponse, + response_format: Optional[Type[BaseModel]], + ) -> None: + r"""Format the response if needed.""" + + if response_format is None: + return + + for message in response.output_messages: + self._try_format_message(message, response_format) + if message.parsed: + continue + + prompt = SIMPLE_FORMAT_PROMPT.format(content=message.content) + openai_message: OpenAIMessage = {"role": "user", "content": prompt} + response = await self._aget_model_response( + [openai_message], response_format, [], 0 + ) + message.content = response.output_messages[0].content + self._try_format_message(message, response_format) + def step( self, input_message: Union[BaseMessage, str], @@ -365,7 +446,10 @@ def step( ) # Get response from model backend response = self._get_model_response( - openai_messages, response_format, num_tokens + openai_messages, + response_format, + self._get_full_tool_schemas(), + num_tokens, ) if self.single_iteration: @@ -379,6 +463,7 @@ def step( break + self._format_response_if_needed(response, response_format) self._record_final_output(response.output_messages) return self._convert_to_chatagent_response( @@ -433,7 +518,10 @@ async def astep( ) response = await self._aget_model_response( - openai_messages, response_format, num_tokens + openai_messages, + response_format, + self._get_full_tool_schemas(), + num_tokens, ) if self.single_iteration: @@ -446,6 +534,7 @@ async def astep( break + await self._aformat_response_if_needed(response, response_format) self._record_final_output(response.output_messages) return self._convert_to_chatagent_response( @@ -488,6 +577,7 @@ def _get_model_response( self, openai_messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]], + tool_schemas: Optional[List[Dict[str, Any]]], num_tokens: int, ) -> ModelResponse: r"""Internal function for agent step model response.""" @@ -495,7 +585,7 @@ def _get_model_response( response = None try: response = self.model_backend.run( - openai_messages, response_format, self._get_full_tool_schemas() + openai_messages, response_format, tool_schemas or None ) except Exception as exc: logger.error( @@ -525,6 +615,7 @@ async def _aget_model_response( self, openai_messages: List[OpenAIMessage], response_format: Optional[Type[BaseModel]], + tool_schemas: Optional[List[Dict[str, Any]]], num_tokens: int, ) -> ModelResponse: r"""Internal function for agent step model response.""" @@ -532,7 +623,7 @@ async def _aget_model_response( response = None try: response = await self.model_backend.arun( - openai_messages, response_format, self._get_full_tool_schemas() + openai_messages, response_format, tool_schemas or None ) except Exception as exc: logger.error( diff --git a/camel/messages/base.py b/camel/messages/base.py index 2dc7540126..70bce94153 100644 --- a/camel/messages/base.py +++ b/camel/messages/base.py @@ -15,7 +15,7 @@ import io import re from dataclasses import dataclass -from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union +from typing import Any, Dict, List, Literal, Optional, Tuple, Union import numpy as np from PIL import Image @@ -72,7 +72,7 @@ class BaseMessage: image_list: Optional[List[Image.Image]] = None image_detail: Literal["auto", "low", "high"] = "auto" video_detail: Literal["auto", "low", "high"] = "low" - parsed: Optional[Union[Type[BaseModel], dict]] = None + parsed: Optional[Union[BaseModel, dict]] = None @classmethod def make_user_message( diff --git a/camel/models/base_model.py b/camel/models/base_model.py index 44dca8de6c..f8a38b5532 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -117,7 +117,12 @@ def run( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ - tools = tools or self.model_config_dict.get("tools", None) + # None -> use default tools + if tools is None: + tools = self.model_config_dict.get("tools", None) + # Empty -> use no tools + elif not tools: + tools = None return self._run(messages, response_format, tools) async def arun( @@ -143,8 +148,10 @@ async def arun( `ChatCompletion` in the non-stream mode, or `AsyncStream[ChatCompletionChunk]` in the stream mode. """ - # If tools are empty, make it None - tools = tools or self.model_config_dict.get("tools", None) + if tools is None: + tools = self.model_config_dict.get("tools", None) + elif not tools: + tools = None return await self._arun(messages, response_format, tools) @abstractmethod diff --git a/camel/models/cohere_model.py b/camel/models/cohere_model.py index ad5ff79dff..4c15eec884 100644 --- a/camel/models/cohere_model.py +++ b/camel/models/cohere_model.py @@ -26,6 +26,7 @@ from camel.configs import COHERE_API_PARAMS, CohereConfig from camel.messages import OpenAIMessage from camel.models import BaseModelBackend +from camel.models._utils import try_modify_message_with_format from camel.types import ChatCompletion, ModelType from camel.utils import ( BaseTokenCounter, @@ -218,6 +219,26 @@ def token_counter(self) -> BaseTokenCounter: ) return self._token_counter + def _prepare_request( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], + ) -> Dict[str, Any]: + request_config = self.model_config_dict.copy() + try_modify_message_with_format(messages[-1], response_format) + if tools: + for tool in tools: + function_dict = tool.get('function', {}) + function_dict.pop("strict", None) + request_config["tools"] = tools + elif response_format: + # Improve stability with native response format support + # This config will be ignored if used with tools + request_config["response_format"] = {"type": "json_object"} + + return request_config + def _run( self, messages: List[OpenAIMessage], @@ -234,13 +255,17 @@ def _run( """ from cohere.core.api_error import ApiError + request_config = self._prepare_request( + messages, response_format, tools + ) + cohere_messages = self._to_cohere_chatmessage(messages) try: response = self._client.chat( messages=cohere_messages, model=self.model_type, - **self.model_config_dict, + **request_config, ) except ApiError as e: logging.error(f"Cohere API Error: {e.status_code}") @@ -284,13 +309,17 @@ async def _arun( """ from cohere.core.api_error import ApiError + request_config = self._prepare_request( + messages, response_format, tools + ) + cohere_messages = self._to_cohere_chatmessage(messages) try: response = await self._async_client.chat( messages=cohere_messages, model=self.model_type, - **self.model_config_dict, + **request_config, ) except ApiError as e: logging.error(f"Cohere API Error: {e.status_code}") diff --git a/examples/minimum_agents/cohere_agent.py b/examples/minimum_agents/cohere_agent.py new file mode 100644 index 0000000000..70337985cc --- /dev/null +++ b/examples/minimum_agents/cohere_agent.py @@ -0,0 +1,43 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +from pydantic import BaseModel + +from camel.agents import ChatAgent +from camel.models import ModelFactory +from camel.toolkits import WeatherToolkit +from camel.types import ModelPlatformType, ModelType + +model = ModelFactory.create( + model_platform=ModelPlatformType.COHERE, + model_type=ModelType.COHERE_COMMAND_R, + api_key="eb5NWNKWQaVbOan3NHa4bmL93K1x4Oy9sN16oulz", +) + + +class ResponseFormat(BaseModel): + celsius: str + fahrenheit: str + + +agent = ChatAgent(model=model, tools=[WeatherToolkit().get_weather_data]) + +resp = agent.step( + "What's the temperature in Beijing?", + response_format=ResponseFormat, +) + + +print(resp.msg.content) +print(resp.msg.parsed) diff --git a/examples/minimum_agents/mistral.py b/examples/minimum_agents/mistral_agent.py similarity index 90% rename from examples/minimum_agents/mistral.py rename to examples/minimum_agents/mistral_agent.py index 787193eaf2..4515eb61e9 100644 --- a/examples/minimum_agents/mistral.py +++ b/examples/minimum_agents/mistral_agent.py @@ -30,7 +30,11 @@ class ResponseFormat(BaseModel): fahrenheit: str -agent = ChatAgent(model=model, tools=[WeatherToolkit().get_weather_data]) +agent = ChatAgent( + "You are a helpful assistant.", + model=model, + tools=[WeatherToolkit().get_weather_data], +) resp = agent.step( "What's the temperature in New York?", @@ -39,6 +43,6 @@ class ResponseFormat(BaseModel): print(resp.msg.content) - +print(resp.msg.parsed) for message in agent.chat_history: print(f"{message['role']}: {message['content']}") diff --git a/examples/minimum_agents/qwen.py b/examples/minimum_agents/qwen_agent.py similarity index 94% rename from examples/minimum_agents/qwen.py rename to examples/minimum_agents/qwen_agent.py index 9a1531905c..552ff7dcfd 100644 --- a/examples/minimum_agents/qwen.py +++ b/examples/minimum_agents/qwen_agent.py @@ -11,7 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -import json from pydantic import BaseModel @@ -38,5 +37,4 @@ class ResponseFormat(BaseModel): response_format=ResponseFormat, ) - -print(ResponseFormat(**json.loads(resp.msg.content))) +print(resp.msg.parsed) From 72ff026bed5ca976778166cabdf293e8bff0c2b6 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Thu, 23 Jan 2025 17:34:35 -0600 Subject: [PATCH 21/28] add response format support --- camel/agents/chat_agent.py | 22 +++-- camel/configs/deepseek_config.py | 34 +------ camel/configs/gemini_config.py | 34 +------ camel/models/cohere_model.py | 4 +- camel/models/deepseek_model.py | 29 +++++- camel/models/gemini_model.py | 108 +++++++++++++++++++--- camel/models/mistral_model.py | 5 +- camel/models/qwen_model.py | 4 +- examples/minimum_agents/cohere_agent.py | 15 +-- examples/minimum_agents/deepseek_agent.py | 42 +++++++++ examples/minimum_agents/gemini_agent.py | 44 +++++++++ examples/minimum_agents/mistral_agent.py | 10 +- examples/minimum_agents/openai_agent.py | 36 ++++++++ examples/minimum_agents/qwen_agent.py | 12 ++- 14 files changed, 287 insertions(+), 112 deletions(-) create mode 100644 examples/minimum_agents/deepseek_agent.py create mode 100644 examples/minimum_agents/gemini_agent.py create mode 100644 examples/minimum_agents/openai_agent.py diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 58a01187d3..df926ac175 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -341,17 +341,23 @@ def record_message(self, message: BaseMessage) -> None: def _try_format_message( self, message: BaseMessage, response_format: Type[BaseModel] - ) -> None: - r"""Try to format the message if needed.""" + ) -> bool: + r"""Try to format the message if needed. + + Returns: + bool: Whether the message is formatted successfully (or no format + is needed). + """ if message.parsed: - return + return True try: message.parsed = response_format.model_validate_json( message.content ) + return True except ValidationError: - logger.warning(f"Failed to parse response: {message.content}") + return False def _format_response_if_needed( self, @@ -368,8 +374,7 @@ def _format_response_if_needed( return for message in response.output_messages: - self._try_format_message(message, response_format) - if message.parsed: + if self._try_format_message(message, response_format): continue prompt = SIMPLE_FORMAT_PROMPT.format(content=message.content) @@ -379,7 +384,8 @@ def _format_response_if_needed( [openai_message], response_format, [], 0 ) message.content = response.output_messages[0].content - self._try_format_message(message, response_format) + if not self._try_format_message(message, response_format): + logger.warning(f"Failed to parse response: {message.content}") async def _aformat_response_if_needed( self, @@ -761,7 +767,7 @@ def _handle_batch_response( output_messages=output_messages, finish_reasons=finish_reasons, usage_dict=usage, - response_id=response.id, + response_id=response.id or "", ) def _handle_stream_response( diff --git a/camel/configs/deepseek_config.py b/camel/configs/deepseek_config.py index 077653206f..d3fddd3a29 100644 --- a/camel/configs/deepseek_config.py +++ b/camel/configs/deepseek_config.py @@ -14,12 +14,11 @@ from __future__ import annotations -from typing import Any, Optional, Sequence, Type, Union +from typing import Optional, Sequence, Type, Union from pydantic import BaseModel from camel.configs.base_config import BaseConfig -from camel.types import NOT_GIVEN, NotGiven class DeepSeekConfig(BaseConfig): @@ -89,10 +88,10 @@ class DeepSeekConfig(BaseConfig): temperature: float = 0.2 # deepseek default: 1.0 top_p: float = 1.0 stream: bool = False - stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN - max_tokens: Union[int, NotGiven] = NOT_GIVEN + stop: Optional[Union[str, Sequence[str]]] = None + max_tokens: Optional[int] = None presence_penalty: float = 0.0 - response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN + response_format: Optional[Union[Type[BaseModel], dict]] = None frequency_penalty: float = 0.0 tool_choice: Optional[Union[dict[str, str], str]] = None logprobs: bool = False @@ -105,30 +104,5 @@ def __init__(self, include_usage: bool = True, **kwargs): if self.stream: self.stream_options = {"include_usage": include_usage} - def as_dict(self) -> dict[str, Any]: - r"""Convert the current configuration to a dictionary. - - This method converts the current configuration object to a dictionary - representation, which can be used for serialization or other purposes. - - Returns: - dict[str, Any]: A dictionary representation of the current - configuration. - """ - config_dict = self.model_dump() - if self.tools: - from camel.toolkits import FunctionTool - - tools_schema = [] - for tool in self.tools: - if not isinstance(tool, FunctionTool): - raise ValueError( - f"The tool {tool} should " - "be an instance of `FunctionTool`." - ) - tools_schema.append(tool.get_openai_tool_schema()) - config_dict["tools"] = NOT_GIVEN - return config_dict - DEEPSEEK_API_PARAMS = {param for param in DeepSeekConfig.model_fields.keys()} diff --git a/camel/configs/gemini_config.py b/camel/configs/gemini_config.py index caa6df7236..2ac5bbe2ae 100644 --- a/camel/configs/gemini_config.py +++ b/camel/configs/gemini_config.py @@ -14,12 +14,11 @@ from __future__ import annotations -from typing import Any, Optional, Sequence, Type, Union +from typing import Optional, Sequence, Type, Union from pydantic import BaseModel from camel.configs.base_config import BaseConfig -from camel.types import NOT_GIVEN, NotGiven class GeminiConfig(BaseConfig): @@ -80,35 +79,10 @@ class GeminiConfig(BaseConfig): top_p: float = 1.0 n: int = 1 stream: bool = False - stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN - max_tokens: Union[int, NotGiven] = NOT_GIVEN - response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN + stop: Optional[Union[str, Sequence[str]]] = None + max_tokens: Optional[int] = None + response_format: Optional[Union[Type[BaseModel], dict]] = None tool_choice: Optional[Union[dict[str, str], str]] = None - def as_dict(self) -> dict[str, Any]: - r"""Convert the current configuration to a dictionary. - - This method converts the current configuration object to a dictionary - representation, which can be used for serialization or other purposes. - - Returns: - dict[str, Any]: A dictionary representation of the current - configuration. - """ - config_dict = self.model_dump() - if self.tools: - from camel.toolkits import FunctionTool - - tools_schema = [] - for tool in self.tools: - if not isinstance(tool, FunctionTool): - raise ValueError( - f"The tool {tool} should " - "be an instance of `FunctionTool`." - ) - tools_schema.append(tool.get_openai_tool_schema()) - config_dict["tools"] = NOT_GIVEN - return config_dict - Gemini_API_PARAMS = {param for param in GeminiConfig.model_fields.keys()} diff --git a/camel/models/cohere_model.py b/camel/models/cohere_model.py index 4c15eec884..89dde545f8 100644 --- a/camel/models/cohere_model.py +++ b/camel/models/cohere_model.py @@ -226,15 +226,13 @@ def _prepare_request( tools: Optional[List[Dict[str, Any]]], ) -> Dict[str, Any]: request_config = self.model_config_dict.copy() - try_modify_message_with_format(messages[-1], response_format) if tools: for tool in tools: function_dict = tool.get('function', {}) function_dict.pop("strict", None) request_config["tools"] = tools elif response_format: - # Improve stability with native response format support - # This config will be ignored if used with tools + try_modify_message_with_format(messages[-1], response_format) request_config["response_format"] = {"type": "json_object"} return request_config diff --git a/camel/models/deepseek_model.py b/camel/models/deepseek_model.py index 0670d175a1..28c7f20561 100644 --- a/camel/models/deepseek_model.py +++ b/camel/models/deepseek_model.py @@ -20,6 +20,7 @@ from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig from camel.messages import OpenAIMessage +from camel.models._utils import try_modify_message_with_format from camel.models.base_model import BaseModelBackend from camel.types import ( ChatCompletion, @@ -103,6 +104,24 @@ def token_counter(self) -> BaseTokenCounter: ) return self._token_counter + def _prepare_request( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], + ) -> Dict[str, Any]: + request_config = self.model_config_dict.copy() + if tools: + for tool in tools: + function_dict = tool.get('function', {}) + function_dict.pop("strict", None) + request_config["tools"] = tools + elif response_format: + try_modify_message_with_format(messages[-1], response_format) + request_config["response_format"] = {"type": "json_object"} + + return request_config + def _run( self, messages: List[OpenAIMessage], @@ -120,10 +139,13 @@ def _run( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ + request_config = self._prepare_request( + messages, response_format, tools + ) response = self._client.chat.completions.create( messages=messages, model=self.model_type, - **self.model_config_dict, + **request_config, ) return response @@ -144,10 +166,13 @@ async def _arun( `ChatCompletion` in the non-stream mode, or `AsyncStream[ChatCompletionChunk]` in the stream mode. """ + request_config = self._prepare_request( + messages, response_format, tools + ) response = await self._async_client.chat.completions.create( messages=messages, model=self.model_type, - **self.model_config_dict, + **request_config, ) return response diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index 50b3ea3aef..e117239344 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -92,50 +92,130 @@ def __init__( def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[Dict[str, Any]]] = None, + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: - r"""Runs inference of Gemini chat completion. + r"""Runs inference of OpenAI chat completion. Args: messages (List[OpenAIMessage]): Message list with the chat history in OpenAI API format. + response_format (Optional[Type[BaseModel]]): The format of the + response. + tools (Optional[List[Dict[str, Any]]]): The schema of the tools to + use for the request. Returns: Union[ChatCompletion, Stream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ - response = self._client.chat.completions.create( - messages=messages, - model=self.model_type, - **self.model_config_dict, + response_format = response_format or self.model_config_dict.get( + "response_format", None ) - return response + if response_format: + return self._request_parse(messages, response_format) + else: + return self._request_chat_completion(messages, tools) async def _arun( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]] = None, - tools: Optional[List[Dict[str, Any]]] = None, + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: - r"""Runs inference of Gemini chat completion asynchronously. + r"""Runs inference of OpenAI chat completion in async mode. Args: messages (List[OpenAIMessage]): Message list with the chat history in OpenAI API format. + response_format (Optional[Type[BaseModel]]): The format of the + response. + tools (Optional[List[Dict[str, Any]]]): The schema of the tools to + use for the request. Returns: Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or `AsyncStream[ChatCompletionChunk]` in the stream mode. """ - response = await self._async_client.chat.completions.create( + response_format = response_format or self.model_config_dict.get( + "response_format", None + ) + if response_format: + return await self._arequest_parse(messages, response_format) + else: + return await self._arequest_chat_completion(messages, tools) + + def _request_chat_completion( + self, + messages: List[OpenAIMessage], + tools: Optional[List[Dict[str, Any]]], + ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: + request_config = self.model_config_dict.copy() + + if tools: + for tool in tools: + function_dict = tool.get('function', {}) + function_dict.pop("strict", None) + request_config["tools"] = tools + + return self._client.chat.completions.create( + messages=messages, + model=self.model_type, + **request_config, + ) + + async def _arequest_chat_completion( + self, + messages: List[OpenAIMessage], + tools: Optional[List[Dict[str, Any]]], + ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: + request_config = self.model_config_dict.copy() + + if tools: + for tool in tools: + function_dict = tool.get('function', {}) + function_dict.pop("strict", None) + request_config["tools"] = tools + + return await self._async_client.chat.completions.create( + messages=messages, + model=self.model_type, + **request_config, + ) + + def _request_parse( + self, + messages: List[OpenAIMessage], + response_format: Type[BaseModel], + ) -> ChatCompletion: + request_config = self.model_config_dict.copy() + + request_config["response_format"] = response_format + request_config.pop("stream", None) + + return self._client.beta.chat.completions.parse( + messages=messages, + model=self.model_type, + **request_config, + ) + + async def _arequest_parse( + self, + messages: List[OpenAIMessage], + response_format: Type[BaseModel], + ) -> ChatCompletion: + request_config = self.model_config_dict.copy() + + request_config["response_format"] = response_format + request_config.pop("stream", None) + + return await self._async_client.beta.chat.completions.parse( messages=messages, model=self.model_type, - **self.model_config_dict, + **request_config, ) - return response @property def token_counter(self) -> BaseTokenCounter: diff --git a/camel/models/mistral_model.py b/camel/models/mistral_model.py index 3fb52e6fb1..899a38adba 100644 --- a/camel/models/mistral_model.py +++ b/camel/models/mistral_model.py @@ -265,14 +265,11 @@ def _prepare_request( response_format: Optional[Type[BaseModel]], tools: Optional[List[Dict[str, Any]]], ) -> Dict[str, Any]: - # TODO: very unstable if tools and response_format are both used request_config = self.model_config_dict.copy() - try_modify_message_with_format(messages[-1], response_format) if tools: request_config["tools"] = tools elif response_format: - # Improve stability with native response format support - # This config will not be allowed if used with tools + try_modify_message_with_format(messages[-1], response_format) request_config["response_format"] = {"type": "json_object"} return request_config diff --git a/camel/models/qwen_model.py b/camel/models/qwen_model.py index 2dbdcebf6c..9cfaf4f5c4 100644 --- a/camel/models/qwen_model.py +++ b/camel/models/qwen_model.py @@ -149,12 +149,10 @@ def _prepare_request( tools: Optional[List[Dict[str, Any]]], ) -> Dict[str, Any]: request_config = self.model_config_dict.copy() - try_modify_message_with_format(messages[-1], response_format) if tools: request_config["tools"] = tools elif response_format: - # Improve stability with native response format support - # This config will be unstable if being used with tools + try_modify_message_with_format(messages[-1], response_format) request_config["response_format"] = {"type": "json_object"} return request_config diff --git a/examples/minimum_agents/cohere_agent.py b/examples/minimum_agents/cohere_agent.py index 70337985cc..3e59726206 100644 --- a/examples/minimum_agents/cohere_agent.py +++ b/examples/minimum_agents/cohere_agent.py @@ -22,22 +22,23 @@ model = ModelFactory.create( model_platform=ModelPlatformType.COHERE, model_type=ModelType.COHERE_COMMAND_R, - api_key="eb5NWNKWQaVbOan3NHa4bmL93K1x4Oy9sN16oulz", ) class ResponseFormat(BaseModel): - celsius: str - fahrenheit: str + max_temp: str + min_temp: str -agent = ChatAgent(model=model, tools=[WeatherToolkit().get_weather_data]) +agent = ChatAgent( + "You are a helpful assistant.", + model=model, + tools=[WeatherToolkit().get_weather_data], +) resp = agent.step( - "What's the temperature in Beijing?", + "What's the temperature in New York today?", response_format=ResponseFormat, ) - -print(resp.msg.content) print(resp.msg.parsed) diff --git a/examples/minimum_agents/deepseek_agent.py b/examples/minimum_agents/deepseek_agent.py new file mode 100644 index 0000000000..a6446b9fa0 --- /dev/null +++ b/examples/minimum_agents/deepseek_agent.py @@ -0,0 +1,42 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +from pydantic import BaseModel + +from camel.agents import ChatAgent +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +model = ModelFactory.create( + model_platform=ModelPlatformType.DEEPSEEK, + model_type=ModelType.DEEPSEEK_CHAT, +) + + +class ResponseFormat(BaseModel): + content: str + fun_level: int + + +agent = ChatAgent( + "You are a helpful assistant.", + model=model, +) + +resp = agent.step( + "Tell me a joke.", + response_format=ResponseFormat, +) + +print(resp.msg.parsed) diff --git a/examples/minimum_agents/gemini_agent.py b/examples/minimum_agents/gemini_agent.py new file mode 100644 index 0000000000..59b9d25ec7 --- /dev/null +++ b/examples/minimum_agents/gemini_agent.py @@ -0,0 +1,44 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +from pydantic import BaseModel + +from camel.agents import ChatAgent +from camel.models import ModelFactory +from camel.toolkits import WeatherToolkit +from camel.types import ModelPlatformType, ModelType + +model = ModelFactory.create( + model_platform=ModelPlatformType.GEMINI, + model_type=ModelType.GEMINI_1_5_FLASH, +) + + +class ResponseFormat(BaseModel): + max_temp: str + min_temp: str + + +agent = ChatAgent( + "You are a helpful assistant.", + model=model, + tools=[WeatherToolkit().get_weather_data], +) + +resp = agent.step( + "What's the temperature in New York today?", + response_format=ResponseFormat, +) + +print(resp.msg.parsed) diff --git a/examples/minimum_agents/mistral_agent.py b/examples/minimum_agents/mistral_agent.py index 4515eb61e9..e6cbe48e3a 100644 --- a/examples/minimum_agents/mistral_agent.py +++ b/examples/minimum_agents/mistral_agent.py @@ -26,8 +26,8 @@ class ResponseFormat(BaseModel): - celsius: str - fahrenheit: str + max_temp: str + min_temp: str agent = ChatAgent( @@ -37,12 +37,8 @@ class ResponseFormat(BaseModel): ) resp = agent.step( - "What's the temperature in New York?", + "What's the temperature in New York today?", response_format=ResponseFormat, ) - -print(resp.msg.content) print(resp.msg.parsed) -for message in agent.chat_history: - print(f"{message['role']}: {message['content']}") diff --git a/examples/minimum_agents/openai_agent.py b/examples/minimum_agents/openai_agent.py new file mode 100644 index 0000000000..7acc92993b --- /dev/null +++ b/examples/minimum_agents/openai_agent.py @@ -0,0 +1,36 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +from pydantic import BaseModel + +from camel.agents import ChatAgent +from camel.toolkits import WeatherToolkit + + +class ResponseFormat(BaseModel): + max_temp: str + min_temp: str + + +agent = ChatAgent( + "You are a helpful assistant.", + tools=[WeatherToolkit().get_weather_data], +) + +resp = agent.step( + "What's the temperature in New York today?", + response_format=ResponseFormat, +) + +print(resp.msg.parsed) diff --git a/examples/minimum_agents/qwen_agent.py b/examples/minimum_agents/qwen_agent.py index 552ff7dcfd..81c1f0627c 100644 --- a/examples/minimum_agents/qwen_agent.py +++ b/examples/minimum_agents/qwen_agent.py @@ -26,14 +26,18 @@ class ResponseFormat(BaseModel): - celsius: str - fahrenheit: str + max_temp: str + min_temp: str -agent = ChatAgent(model=model, tools=[WeatherToolkit().get_weather_data]) +agent = ChatAgent( + "You are a helpful assistant.", + model=model, + tools=[WeatherToolkit().get_weather_data], +) resp = agent.step( - "What's the temperature in Beijing?", + "What's the temperature in New York today?", response_format=ResponseFormat, ) From 03e494b4166400a01f3c86107e997a0d87c99662 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Tue, 4 Feb 2025 12:34:00 -0600 Subject: [PATCH 22/28] Update camel/models/gemini_model.py Co-authored-by: Xiaotian Jin --- camel/models/gemini_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index 18eafc23a6..bb0ccfc832 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -91,7 +91,7 @@ def __init__( def _process_messages(self, messages) -> List[OpenAIMessage]: r"""Process the messages for Gemini API to ensure no empty content, - which is not accepeted by Gemini. + which is not accepted by Gemini. """ processed_messages = [] for msg in messages: From 5667bb4655fbb83e06bec02f05ff726ec368584e Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Tue, 4 Feb 2025 12:45:52 -0600 Subject: [PATCH 23/28] update fixes --- .gitignore | 2 ++ camel/agents/chat_agent.py | 6 ++++++ camel/models/gemini_model.py | 2 +- poetry.lock | 4 ---- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index f3e8aa18f9..7159aad4eb 100644 --- a/.gitignore +++ b/.gitignore @@ -431,3 +431,5 @@ benchmark/gaia/results.jsonl # Secret files for docker .container/.env + +examples/datagen/star/outputs/ diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 7cacb7878d..deeeddd65e 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -125,6 +125,12 @@ class ChatAgent(BaseAgent): tools (Optional[List[Union[FunctionTool, Callable]]], optional): List of available :obj:`FunctionTool` or :obj:`Callable`. (default: :obj:`None`) + external_tools (Optional[List[Union[FunctionTool, Callable, + Dict[str, Any]]]], optional): List of external tools + (:obj:`FunctionTool` or :obj:`Callable` or :obj:`Dict[str, Any]`) + bind to one chat agent. When these tools are called, the agent will + directly return the request instead of processing it. + (default: :obj:`None`) response_terminators (List[ResponseTerminator], optional): List of :obj:`ResponseTerminator` bind to one chat agent. (default: :obj:`None`) diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index bb0ccfc832..dd59195078 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -107,7 +107,7 @@ def _run( response_format: Optional[Type[BaseModel]], tools: Optional[List[Dict[str, Any]]], ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: - r"""Runs inference of OpenAI chat completion. + r"""Runs inference of Gemini chat completion. Args: messages (List[OpenAIMessage]): Message list with the chat history diff --git a/poetry.lock b/poetry.lock index 543da91eae..d772211ebe 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,8 +1,4 @@ -<<<<<<< HEAD # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. -======= -# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. ->>>>>>> master [[package]] name = "accelerate" From 03fc264c11f558df1d49d77729429c9e3b816be0 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Tue, 11 Feb 2025 10:38:23 -0600 Subject: [PATCH 24/28] add aiml tool calling support --- camel/models/aiml_model.py | 41 +++++++++++++++++++------ examples/minimum_agents/aiml_agent.py | 43 +++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 9 deletions(-) create mode 100644 examples/minimum_agents/aiml_agent.py diff --git a/camel/models/aiml_model.py b/camel/models/aiml_model.py index 47e7fcb6ae..26aec27f3f 100644 --- a/camel/models/aiml_model.py +++ b/camel/models/aiml_model.py @@ -14,11 +14,12 @@ import os from typing import Any, Dict, List, Optional, Type, Union -from openai import AsyncStream, OpenAI, Stream +from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream from pydantic import BaseModel from camel.configs import AIML_API_PARAMS, AIMLConfig from camel.messages import OpenAIMessage +from camel.models._utils import try_modify_message_with_format from camel.models.base_model import BaseModelBackend from camel.types import ( ChatCompletion, @@ -82,6 +83,25 @@ def __init__( api_key=self._api_key, base_url=self._url, ) + self._async_client = AsyncOpenAI( + timeout=180, + max_retries=3, + api_key=self._api_key, + base_url=self._url, + ) + + def _prepare_request( + self, + messages: List[OpenAIMessage], + response_format: Optional[Type[BaseModel]], + tools: Optional[List[Dict[str, Any]]], + ) -> Dict[str, Any]: + request_config = self.model_config_dict.copy() + if tools: + request_config['tools'] = tools + if response_format: + try_modify_message_with_format(messages[-1], response_format) + return request_config def _run( self, @@ -100,15 +120,12 @@ def _run( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ - # Process model configuration parameters - model_config = self.model_config_dict.copy() - - # Handle special case for tools parameter - if model_config.get('tools') is None: - model_config['tools'] = [] + request_config = self._prepare_request( + messages, response_format, tools + ) response = self._client.chat.completions.create( - messages=messages, model=self.model_type, **model_config + messages=messages, model=self.model_type, **request_config ) return response @@ -118,7 +135,13 @@ async def _arun( response_format: Optional[Type[BaseModel]], tools: Optional[List[Dict[str, Any]]], ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: - raise NotImplementedError("AIML does not support async inference.") + request_config = self._prepare_request( + messages, response_format, tools + ) + response = await self._async_client.chat.completions.create( + messages=messages, model=self.model_type, **request_config + ) + return response @property def token_counter(self) -> BaseTokenCounter: diff --git a/examples/minimum_agents/aiml_agent.py b/examples/minimum_agents/aiml_agent.py new file mode 100644 index 0000000000..da5d2ef13f --- /dev/null +++ b/examples/minimum_agents/aiml_agent.py @@ -0,0 +1,43 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +from pydantic import BaseModel + +from camel.agents import ChatAgent +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +model = ModelFactory.create( + model_platform=ModelPlatformType.AIML, + model_type=ModelType.AIML_MISTRAL_7B_INSTRUCT, +) + + +class ResponseFormat(BaseModel): + max_temp: str + min_temp: str + + +agent = ChatAgent( + "You are a helpful assistant.", + model=model, + # tools=[WeatherToolkit().get_weather_data], +) + +resp = agent.step( + "At what tempreture does the water boil?", + response_format=ResponseFormat, +) + +print(resp.msg.parsed) From d001a10fbe4c93c52523d3a6edcb12a09f8203cb Mon Sep 17 00:00:00 2001 From: Wendong-Fan <133094783+Wendong-Fan@users.noreply.github.com> Date: Fri, 14 Feb 2025 13:16:31 +0800 Subject: [PATCH 25/28] fix: Issues based on review comment for ChatAgent refactor (#1602) --- camel/agents/chat_agent.py | 32 +++++++++++-------- camel/models/_utils.py | 2 +- camel/models/aiml_model.py | 12 +++---- camel/models/base_model.py | 8 ++--- camel/models/cohere_model.py | 4 +-- camel/models/deepseek_model.py | 4 +-- camel/models/gemini_model.py | 12 +++---- camel/models/internlm_model.py | 8 ++--- camel/models/mistral_model.py | 12 +++---- camel/models/moonshot_model.py | 8 ++--- camel/models/openai_model.py | 16 +++++----- camel/models/qwen_model.py | 8 ++--- camel/models/siliconflow_model.py | 8 ++--- .../toolkits/role_playing_with_functions.py | 16 +++------- test/models/test_model_factory.py | 2 +- 15 files changed, 75 insertions(+), 77 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index fb01e95f31..f7f18323bb 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -211,7 +211,7 @@ def __init__( } self._external_tool_schemas = { - tool_schema["name"]: tool_schema + tool_schema["function"]["name"]: tool_schema for tool_schema in [ convert_to_schema(tool) for tool in (external_tools or []) ] @@ -368,7 +368,7 @@ def _try_format_message( def _format_response_if_needed( self, response: ModelResponse, - response_format: Optional[Type[BaseModel]], + response_format: Optional[Type[BaseModel]] = None, ) -> None: r"""Format the response if needed. @@ -387,7 +387,7 @@ def _format_response_if_needed( openai_message: OpenAIMessage = {"role": "user", "content": prompt} # Explicitly set the tools to empty list to avoid calling tools response = self._get_model_response( - [openai_message], response_format, [], 0 + [openai_message], 0, response_format, [] ) message.content = response.output_messages[0].content if not self._try_format_message(message, response_format): @@ -396,7 +396,7 @@ def _format_response_if_needed( async def _aformat_response_if_needed( self, response: ModelResponse, - response_format: Optional[Type[BaseModel]], + response_format: Optional[Type[BaseModel]] = None, ) -> None: r"""Format the response if needed.""" @@ -411,7 +411,7 @@ async def _aformat_response_if_needed( prompt = SIMPLE_FORMAT_PROMPT.format(content=message.content) openai_message: OpenAIMessage = {"role": "user", "content": prompt} response = await self._aget_model_response( - [openai_message], response_format, [], 0 + [openai_message], 0, response_format, [] ) message.content = response.output_messages[0].content self._try_format_message(message, response_format) @@ -459,9 +459,9 @@ def step( # Get response from model backend response = self._get_model_response( openai_messages, + num_tokens, response_format, self._get_full_tool_schemas(), - num_tokens, ) if self.single_iteration: @@ -470,8 +470,14 @@ def step( # TODO: return with external tools # If tool call requested, execute it and enter the next iteration if tool_call_request := response.tool_call_request: - tool_call_records.append(self._execute_tool(tool_call_request)) - continue + if ( + tool_call_request.func_name + not in self._external_tool_schemas + ): + tool_call_records.append( + self._execute_tool(tool_call_request) + ) + continue break @@ -531,9 +537,9 @@ async def astep( response = await self._aget_model_response( openai_messages, + num_tokens, response_format, self._get_full_tool_schemas(), - num_tokens, ) if self.single_iteration: @@ -588,9 +594,9 @@ def _record_final_output(self, output_messages: List[BaseMessage]) -> None: def _get_model_response( self, openai_messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tool_schemas: Optional[List[Dict[str, Any]]], num_tokens: int, + response_format: Optional[Type[BaseModel]] = None, + tool_schemas: Optional[List[Dict[str, Any]]] = None, ) -> ModelResponse: r"""Internal function for agent step model response.""" @@ -626,9 +632,9 @@ def _get_model_response( async def _aget_model_response( self, openai_messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tool_schemas: Optional[List[Dict[str, Any]]], num_tokens: int, + response_format: Optional[Type[BaseModel]] = None, + tool_schemas: Optional[List[Dict[str, Any]]] = None, ) -> ModelResponse: r"""Internal function for agent step model response.""" diff --git a/camel/models/_utils.py b/camel/models/_utils.py index e8b32618bd..d727b6b88d 100644 --- a/camel/models/_utils.py +++ b/camel/models/_utils.py @@ -21,7 +21,7 @@ def try_modify_message_with_format( message: OpenAIMessage, - response_format: Optional[Type[BaseModel]], + response_format: Optional[Type[BaseModel]] = None, ) -> None: r"""Modifies the content of the message to include the instruction of using the response format. diff --git a/camel/models/aiml_model.py b/camel/models/aiml_model.py index 26aec27f3f..64c6c9a424 100644 --- a/camel/models/aiml_model.py +++ b/camel/models/aiml_model.py @@ -93,8 +93,8 @@ def __init__( def _prepare_request( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: request_config = self.model_config_dict.copy() if tools: @@ -106,8 +106,8 @@ def _prepare_request( def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. @@ -132,8 +132,8 @@ def _run( async def _arun( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: request_config = self._prepare_request( messages, response_format, tools diff --git a/camel/models/base_model.py b/camel/models/base_model.py index f6a04b96bd..b8bafe6dcc 100644 --- a/camel/models/base_model.py +++ b/camel/models/base_model.py @@ -78,8 +78,8 @@ def token_counter(self) -> BaseTokenCounter: def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: pass @@ -87,8 +87,8 @@ def _run( async def _arun( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: pass diff --git a/camel/models/cohere_model.py b/camel/models/cohere_model.py index 89dde545f8..6d57a617b5 100644 --- a/camel/models/cohere_model.py +++ b/camel/models/cohere_model.py @@ -222,8 +222,8 @@ def token_counter(self) -> BaseTokenCounter: def _prepare_request( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: request_config = self.model_config_dict.copy() if tools: diff --git a/camel/models/deepseek_model.py b/camel/models/deepseek_model.py index 942531e3d3..7591279f20 100644 --- a/camel/models/deepseek_model.py +++ b/camel/models/deepseek_model.py @@ -120,8 +120,8 @@ def token_counter(self) -> BaseTokenCounter: def _prepare_request( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: request_config = self.model_config_dict.copy() diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index dd59195078..ab7428383c 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -104,8 +104,8 @@ def _process_messages(self, messages) -> List[OpenAIMessage]: def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Gemini chat completion. @@ -134,8 +134,8 @@ def _run( async def _arun( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion in async mode. @@ -164,7 +164,7 @@ async def _arun( def _request_chat_completion( self, messages: List[OpenAIMessage], - tools: Optional[List[Dict[str, Any]]], + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: request_config = self.model_config_dict.copy() @@ -183,7 +183,7 @@ def _request_chat_completion( async def _arequest_chat_completion( self, messages: List[OpenAIMessage], - tools: Optional[List[Dict[str, Any]]], + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: request_config = self.model_config_dict.copy() diff --git a/camel/models/internlm_model.py b/camel/models/internlm_model.py index e20d015ffc..92b9c0c06f 100644 --- a/camel/models/internlm_model.py +++ b/camel/models/internlm_model.py @@ -86,8 +86,8 @@ def __init__( def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of InternLM chat completion. @@ -110,8 +110,8 @@ def _run( async def _arun( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: raise NotImplementedError("InternLM does not support async inference.") diff --git a/camel/models/mistral_model.py b/camel/models/mistral_model.py index 55b60059e5..a878263a61 100644 --- a/camel/models/mistral_model.py +++ b/camel/models/mistral_model.py @@ -220,16 +220,16 @@ def token_counter(self) -> BaseTokenCounter: async def _arun( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: raise NotImplementedError("Mistral does not support async inference.") def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> ChatCompletion: r"""Runs inference of Mistral chat completion. @@ -276,8 +276,8 @@ def _run( def _prepare_request( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: request_config = self.model_config_dict.copy() if tools: diff --git a/camel/models/moonshot_model.py b/camel/models/moonshot_model.py index cd602e7b85..b53d2994af 100644 --- a/camel/models/moonshot_model.py +++ b/camel/models/moonshot_model.py @@ -82,8 +82,8 @@ def __init__( def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Moonshot chat completion. @@ -107,8 +107,8 @@ def _run( async def _arun( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: raise NotImplementedError("Moonshot does not support async inference.") diff --git a/camel/models/openai_model.py b/camel/models/openai_model.py index d356cdcf99..2e7950f617 100644 --- a/camel/models/openai_model.py +++ b/camel/models/openai_model.py @@ -134,8 +134,8 @@ def token_counter(self) -> BaseTokenCounter: def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion. @@ -163,8 +163,8 @@ def _run( async def _arun( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: r"""Runs inference of OpenAI chat completion in async mode. @@ -192,7 +192,7 @@ async def _arun( def _request_chat_completion( self, messages: List[OpenAIMessage], - tools: Optional[List[Dict[str, Any]]], + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: request_config = self.model_config_dict.copy() @@ -213,7 +213,7 @@ def _request_chat_completion( async def _arequest_chat_completion( self, messages: List[OpenAIMessage], - tools: Optional[List[Dict[str, Any]]], + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: request_config = self.model_config_dict.copy() @@ -235,7 +235,7 @@ def _request_parse( self, messages: List[OpenAIMessage], response_format: Type[BaseModel], - tools: Optional[List[Dict[str, Any]]], + tools: Optional[List[Dict[str, Any]]] = None, ) -> ChatCompletion: request_config = self.model_config_dict.copy() @@ -256,7 +256,7 @@ async def _arequest_parse( self, messages: List[OpenAIMessage], response_format: Type[BaseModel], - tools: Optional[List[Dict[str, Any]]], + tools: Optional[List[Dict[str, Any]]] = None, ) -> ChatCompletion: request_config = self.model_config_dict.copy() diff --git a/camel/models/qwen_model.py b/camel/models/qwen_model.py index 9cfaf4f5c4..68f72836db 100644 --- a/camel/models/qwen_model.py +++ b/camel/models/qwen_model.py @@ -117,8 +117,8 @@ async def _arun( def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Qwen chat completion. @@ -145,8 +145,8 @@ def _run( def _prepare_request( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: request_config = self.model_config_dict.copy() if tools: diff --git a/camel/models/siliconflow_model.py b/camel/models/siliconflow_model.py index 2b8d86dedb..f1a9508009 100644 --- a/camel/models/siliconflow_model.py +++ b/camel/models/siliconflow_model.py @@ -86,8 +86,8 @@ def __init__( def _run( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of SiliconFlow chat completion. @@ -110,8 +110,8 @@ def _run( async def _arun( self, messages: List[OpenAIMessage], - response_format: Optional[Type[BaseModel]], - tools: Optional[List[Dict[str, Any]]], + response_format: Optional[Type[BaseModel]] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: raise NotImplementedError( "SiliconFlow does not support async inference." diff --git a/examples/toolkits/role_playing_with_functions.py b/examples/toolkits/role_playing_with_functions.py index 157ea4bf17..4950d99115 100644 --- a/examples/toolkits/role_playing_with_functions.py +++ b/examples/toolkits/role_playing_with_functions.py @@ -17,7 +17,6 @@ from colorama import Fore from camel.agents.chat_agent import ToolCallingRecord -from camel.configs import ChatGPTConfig from camel.models import ModelFactory from camel.societies import RolePlaying from camel.toolkits import ( @@ -29,8 +28,8 @@ def main( - model_platform=ModelPlatformType.DEFAULT, - model_type=ModelType.DEFAULT, + model_platform=ModelPlatformType.NVIDIA, + model_type=ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT, chat_turn_limit=10, ) -> None: task_prompt = ( @@ -38,18 +37,13 @@ def main( "estimate the current age of University of Oxford " "and then add 10 more years to this age, " "and get the current weather of the city where " - "the University is located." + "the University is located. You must use tool to solve the task." ) - user_model_config = ChatGPTConfig(temperature=0.0) - tools_list = [ *MathToolkit().get_tools(), - *SearchToolkit().get_tools(), + SearchToolkit().search_duckduckgo, ] - assistant_model_config = ChatGPTConfig( - temperature=0.0, - ) role_play_session = RolePlaying( assistant_role_name="Searcher", @@ -58,7 +52,6 @@ def main( model=ModelFactory.create( model_platform=model_platform, model_type=model_type, - model_config_dict=assistant_model_config.as_dict(), ), tools=tools_list, ), @@ -66,7 +59,6 @@ def main( model=ModelFactory.create( model_platform=model_platform, model_type=model_type, - model_config_dict=user_model_config.as_dict(), ), ), task_prompt=task_prompt, diff --git a/test/models/test_model_factory.py b/test/models/test_model_factory.py index bc0d77346b..3e06732ed2 100644 --- a/test/models/test_model_factory.py +++ b/test/models/test_model_factory.py @@ -30,7 +30,7 @@ parametrize = pytest.mark.parametrize( 'model_platform, model_type', [ - (ModelPlatformType.OPENAI, ModelType.GPT_3_5_TURBO), + (ModelPlatformType.OPENAI, ModelType.GPT_4O), (ModelPlatformType.OPENAI, ModelType.GPT_4O_MINI), ], ) From c0e43c14a4da8737c576d4eb2c2fea56aa82703d Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Fri, 14 Feb 2025 17:50:59 -0600 Subject: [PATCH 26/28] fix according to reviews --- .gitignore | 2 - camel/agents/_types.py | 2 +- camel/agents/_utils.py | 6 ++ camel/agents/chat_agent.py | 100 ++++++++++++++-------- camel/toolkits/function_tool.py | 9 ++ camel/types/agents/tool_calling_record.py | 4 +- camel/utils/async_func.py | 4 +- examples/minimum_agents/aiml_agent.py | 43 ---------- examples/minimum_agents/cohere_agent.py | 44 ---------- examples/minimum_agents/deepseek_agent.py | 44 ---------- examples/minimum_agents/gemini_agent.py | 44 ---------- examples/minimum_agents/mistral_agent.py | 44 ---------- examples/minimum_agents/openai_agent.py | 36 -------- examples/minimum_agents/qwen_agent.py | 44 ---------- test/agents/test_chat_agent.py | 73 ++++------------ test/agents/test_role_playing.py | 4 +- test/models/test_model_factory.py | 2 +- test/models/test_model_manager.py | 8 +- 18 files changed, 111 insertions(+), 402 deletions(-) delete mode 100644 examples/minimum_agents/aiml_agent.py delete mode 100644 examples/minimum_agents/cohere_agent.py delete mode 100644 examples/minimum_agents/deepseek_agent.py delete mode 100644 examples/minimum_agents/gemini_agent.py delete mode 100644 examples/minimum_agents/mistral_agent.py delete mode 100644 examples/minimum_agents/openai_agent.py delete mode 100644 examples/minimum_agents/qwen_agent.py diff --git a/.gitignore b/.gitignore index 7159aad4eb..f3e8aa18f9 100644 --- a/.gitignore +++ b/.gitignore @@ -431,5 +431,3 @@ benchmark/gaia/results.jsonl # Secret files for docker .container/.env - -examples/datagen/star/outputs/ diff --git a/camel/agents/_types.py b/camel/agents/_types.py index 70b881d3ee..0ce37c6d19 100644 --- a/camel/agents/_types.py +++ b/camel/agents/_types.py @@ -23,7 +23,7 @@ class ToolCallRequest(BaseModel): r"""The request for tool calling.""" - func_name: str + tool_name: str args: Dict[str, Any] tool_call_id: str diff --git a/camel/agents/_utils.py b/camel/agents/_utils.py index 36def466fb..da4ed8f9f5 100644 --- a/camel/agents/_utils.py +++ b/camel/agents/_utils.py @@ -23,6 +23,7 @@ Function, ) +from camel.agents._types import ToolCallRequest from camel.toolkits import FunctionTool from camel.types import Choice from camel.types.agents import ToolCallingRecord @@ -183,6 +184,7 @@ def get_info_dict( termination_reasons: List[str], num_tokens: int, tool_calls: List[ToolCallingRecord], + external_tool_call_request: Optional[ToolCallRequest] = None, ) -> Dict[str, Any]: r"""Returns a dictionary containing information about the chat session. @@ -195,6 +197,9 @@ def get_info_dict( num_tokens (int): The number of tokens used in the chat session. tool_calls (List[ToolCallingRecord]): The list of function calling records, containing the information of called tools. + external_tool_call_request (Optional[ToolCallRequest]): The + request for external tool call. + Returns: Dict[str, Any]: The chat session information. @@ -205,6 +210,7 @@ def get_info_dict( "termination_reasons": termination_reasons, "num_tokens": num_tokens, "tool_calls": tool_calls, + "external_tool_call_request": external_tool_call_request, } diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index f7f18323bb..e130a86a05 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -182,7 +182,7 @@ def __init__( ) # Set up system message and initialize messages - self._system_message = ( + self._original_system_message = ( BaseMessage.make_assistant_message( role_name="Assistant", content=system_message ) @@ -190,7 +190,9 @@ def __init__( else system_message ) self._output_language = output_language - self._update_system_message_for_output_language() + self._system_message = ( + self._generate_system_message_for_output_language() + ) self.init_messages() # Set up role name and role type @@ -231,12 +233,31 @@ def reset(self): @property def system_message(self) -> Optional[BaseMessage]: + r"""Returns the system message for the agent.""" return self._system_message @property def tool_dict(self) -> Dict[str, FunctionTool]: + r"""Returns a dictionary of internal tools.""" return self._internal_tools + @property + def output_language(self) -> Optional[str]: + r"""Returns the output language for the agent.""" + return self._output_language + + @output_language.setter + def output_language(self, value: str) -> None: + r"""Set the output language for the agent. + + Note that this will clear the message history. + """ + self._output_language = value + self._system_message = ( + self._generate_system_message_for_output_language() + ) + self.init_messages() + def _get_full_tool_schemas(self) -> List[Dict[str, Any]]: r"""Returns a list of tool schemas of all tools, including internal and external tools. @@ -303,25 +324,30 @@ def update_memory( MemoryRecord(message=message, role_at_backend=role) ) - def _update_system_message_for_output_language(self) -> None: - r"""Updates the output language for the system message. The output - language determines the language in which the output text should be - generated. + def _generate_system_message_for_output_language( + self, + ) -> Optional[BaseMessage]: + r"""Generate a new system message with the output language prompt. + + The output language determines the language in which the output text + should be generated. + + Returns: + BaseMessage: The new system message. """ if not self._output_language: - return + return self._original_system_message language_prompt = ( "\nRegardless of the input language, " f"you must output text in {self._output_language}." ) - if self._system_message is not None: - content = self._system_message.content + language_prompt - self._system_message = self._system_message.create_new_instance( - content - ) + + if self._original_system_message is not None: + content = self._original_system_message.content + language_prompt + return self._original_system_message.create_new_instance(content) else: - self._system_message = BaseMessage.make_assistant_message( + return BaseMessage.make_assistant_message( role_name="Assistant", content=language_prompt, ) @@ -448,6 +474,7 @@ def step( self.update_memory(input_message, OpenAIBackendRole.USER) tool_call_records: List[ToolCallingRecord] = [] + external_tool_call_request: Optional[ToolCallRequest] = None while True: try: @@ -467,17 +494,13 @@ def step( if self.single_iteration: break - # TODO: return with external tools - # If tool call requested, execute it and enter the next iteration if tool_call_request := response.tool_call_request: - if ( - tool_call_request.func_name - not in self._external_tool_schemas - ): - tool_call_records.append( - self._execute_tool(tool_call_request) - ) - continue + if tool_call_request.tool_name in self._external_tool_schemas: + external_tool_call_request = tool_call_request + break + + tool_call_records.append(self._execute_tool(tool_call_request)) + continue break @@ -485,7 +508,7 @@ def step( self._record_final_output(response.output_messages) return self._convert_to_chatagent_response( - response, tool_call_records, num_tokens + response, tool_call_records, num_tokens, external_tool_call_request ) @property @@ -527,6 +550,7 @@ async def astep( self.update_memory(input_message, OpenAIBackendRole.USER) tool_call_records: List[ToolCallingRecord] = [] + external_tool_call_request: Optional[ToolCallRequest] = None while True: try: openai_messages, num_tokens = self.memory.get_context() @@ -546,6 +570,10 @@ async def astep( break if tool_call_request := response.tool_call_request: + if tool_call_request.tool_name in self._external_tool_schemas: + external_tool_call_request = tool_call_request + break + tool_call_record = await self._aexecute_tool(tool_call_request) tool_call_records.append(tool_call_record) continue @@ -556,7 +584,7 @@ async def astep( self._record_final_output(response.output_messages) return self._convert_to_chatagent_response( - response, tool_call_records, num_tokens + response, tool_call_records, num_tokens, external_tool_call_request ) def _convert_to_chatagent_response( @@ -564,6 +592,7 @@ def _convert_to_chatagent_response( response: ModelResponse, tool_call_records: List[ToolCallingRecord], num_tokens: int, + external_tool_call_request: Optional[ToolCallRequest], ) -> ChatAgentResponse: r"""Parse the final model response into the chat agent response.""" info = self._step_get_info( @@ -573,6 +602,7 @@ def _convert_to_chatagent_response( response.response_id, tool_call_records, num_tokens, + external_tool_call_request, ) return ChatAgentResponse( @@ -675,6 +705,7 @@ def _step_get_info( response_id: str, tool_calls: List[ToolCallingRecord], num_tokens: int, + external_tool_call_request: Optional[ToolCallRequest] = None, ) -> Dict[str, Any]: r"""Process the output of a chat step and gather information about the step. @@ -694,6 +725,8 @@ def _step_get_info( tool_calls (List[ToolCallingRecord]): Records of function calls made during this step. num_tokens (int): The number of tokens used in this step. + external_tool_call_request (Optional[ToolCallRequest]): The + request for external tool call. Returns: Dict[str, Any]: A dictionary containing information about the chat @@ -729,6 +762,7 @@ def _step_get_info( finish_reasons, num_tokens, tool_calls, + external_tool_call_request, ) def _handle_batch_response( @@ -769,11 +803,11 @@ def _handle_batch_response( tool_call_request: Optional[ToolCallRequest] = None if tool_calls := response.choices[0].message.tool_calls: - func_name = tool_calls[0].function.name + tool_name = tool_calls[0].function.name tool_call_id = tool_calls[0].id args = json.loads(tool_calls[0].function.arguments) tool_call_request = ToolCallRequest( - func_name=func_name, args=args, tool_call_id=tool_call_id + tool_name=tool_name, args=args, tool_call_id=tool_call_id ) return ModelResponse( @@ -939,7 +973,7 @@ def _execute_tool( FunctionCallingRecord: A struct for logging information about this function call. """ - func_name = tool_call_request.func_name + func_name = tool_call_request.tool_name args = tool_call_request.args tool_call_id = tool_call_request.tool_call_id tool = self._internal_tools[func_name] @@ -951,15 +985,11 @@ async def _aexecute_tool( self, tool_call_request: ToolCallRequest, ) -> ToolCallingRecord: - func_name = tool_call_request.func_name + func_name = tool_call_request.tool_name args = tool_call_request.args tool_call_id = tool_call_request.tool_call_id tool = self._internal_tools[func_name] - if tool.is_async: - result = await tool(**args) - else: - result = tool(**args) - + result = await tool.async_call(**args) return self._record_tool_calling(func_name, args, result, tool_call_id) def _record_tool_calling( @@ -996,7 +1026,7 @@ def _record_tool_calling( # Record information about this tool call tool_record = ToolCallingRecord( - func_name=func_name, + tool_name=func_name, args=args, result=result, tool_call_id=tool_call_id, diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index d7e92c4020..737d130ea1 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -398,6 +398,15 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: f"Error: {e}" ) + async def async_call(self, *args: Any, **kwargs: Any) -> Any: + if self.synthesize_output: + result = self.synthesize_execution_output(args, kwargs) + return result + if self.is_async: + return await self.func(*args, **kwargs) + else: + return self.func(*args, **kwargs) + @property def is_async(self) -> bool: return inspect.iscoroutinefunction(self.func) diff --git a/camel/types/agents/tool_calling_record.py b/camel/types/agents/tool_calling_record.py index 14adb4897b..d3359b4de5 100644 --- a/camel/types/agents/tool_calling_record.py +++ b/camel/types/agents/tool_calling_record.py @@ -26,7 +26,7 @@ class ToolCallingRecord(BaseModel): tool_call_id (str): The ID of the tool call, if available. """ - func_name: str + tool_name: str args: Dict[str, Any] result: Any tool_call_id: str @@ -38,7 +38,7 @@ def __str__(self) -> str: str: Modified string to represent the tool calling. """ return ( - f"Tool Execution: {self.func_name}\n" + f"Tool Execution: {self.tool_name}\n" f"\tArgs: {self.args}\n" f"\tResult: {self.result}\n" ) diff --git a/camel/utils/async_func.py b/camel/utils/async_func.py index 2e1c612ab5..69e4d01bfa 100644 --- a/camel/utils/async_func.py +++ b/camel/utils/async_func.py @@ -33,8 +33,8 @@ def sync_funcs_to_async(funcs: list[FunctionTool]) -> list[FunctionTool]: for func in funcs: sync_func = func.func - def async_callable(*args, **kwargs): - return asyncio.to_thread(sync_func, *args, **kwargs) # noqa: B023 + async def async_callable(*args, **kwargs): + return await asyncio.to_thread(sync_func, *args, **kwargs) # noqa: B023 async_funcs.append( FunctionTool(async_callable, deepcopy(func.openai_tool_schema)) diff --git a/examples/minimum_agents/aiml_agent.py b/examples/minimum_agents/aiml_agent.py deleted file mode 100644 index da5d2ef13f..0000000000 --- a/examples/minimum_agents/aiml_agent.py +++ /dev/null @@ -1,43 +0,0 @@ -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= - -from pydantic import BaseModel - -from camel.agents import ChatAgent -from camel.models import ModelFactory -from camel.types import ModelPlatformType, ModelType - -model = ModelFactory.create( - model_platform=ModelPlatformType.AIML, - model_type=ModelType.AIML_MISTRAL_7B_INSTRUCT, -) - - -class ResponseFormat(BaseModel): - max_temp: str - min_temp: str - - -agent = ChatAgent( - "You are a helpful assistant.", - model=model, - # tools=[WeatherToolkit().get_weather_data], -) - -resp = agent.step( - "At what tempreture does the water boil?", - response_format=ResponseFormat, -) - -print(resp.msg.parsed) diff --git a/examples/minimum_agents/cohere_agent.py b/examples/minimum_agents/cohere_agent.py deleted file mode 100644 index 3e59726206..0000000000 --- a/examples/minimum_agents/cohere_agent.py +++ /dev/null @@ -1,44 +0,0 @@ -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= - -from pydantic import BaseModel - -from camel.agents import ChatAgent -from camel.models import ModelFactory -from camel.toolkits import WeatherToolkit -from camel.types import ModelPlatformType, ModelType - -model = ModelFactory.create( - model_platform=ModelPlatformType.COHERE, - model_type=ModelType.COHERE_COMMAND_R, -) - - -class ResponseFormat(BaseModel): - max_temp: str - min_temp: str - - -agent = ChatAgent( - "You are a helpful assistant.", - model=model, - tools=[WeatherToolkit().get_weather_data], -) - -resp = agent.step( - "What's the temperature in New York today?", - response_format=ResponseFormat, -) - -print(resp.msg.parsed) diff --git a/examples/minimum_agents/deepseek_agent.py b/examples/minimum_agents/deepseek_agent.py deleted file mode 100644 index 5d3ef98d75..0000000000 --- a/examples/minimum_agents/deepseek_agent.py +++ /dev/null @@ -1,44 +0,0 @@ -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= - -from pydantic import BaseModel - -from camel.agents import ChatAgent -from camel.models import ModelFactory -from camel.toolkits import WeatherToolkit -from camel.types import ModelPlatformType, ModelType - -model = ModelFactory.create( - model_platform=ModelPlatformType.DEEPSEEK, - model_type=ModelType.DEEPSEEK_CHAT, -) - - -class ResponseFormat(BaseModel): - max_temp: str - min_temp: str - - -agent = ChatAgent( - "You are a helpful assistant.", - model=model, - tools=[WeatherToolkit().get_weather_data], -) - -resp = agent.step( - "What's the temperature in New York today?", - response_format=ResponseFormat, -) - -print(resp.msg.parsed) diff --git a/examples/minimum_agents/gemini_agent.py b/examples/minimum_agents/gemini_agent.py deleted file mode 100644 index 59b9d25ec7..0000000000 --- a/examples/minimum_agents/gemini_agent.py +++ /dev/null @@ -1,44 +0,0 @@ -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= - -from pydantic import BaseModel - -from camel.agents import ChatAgent -from camel.models import ModelFactory -from camel.toolkits import WeatherToolkit -from camel.types import ModelPlatformType, ModelType - -model = ModelFactory.create( - model_platform=ModelPlatformType.GEMINI, - model_type=ModelType.GEMINI_1_5_FLASH, -) - - -class ResponseFormat(BaseModel): - max_temp: str - min_temp: str - - -agent = ChatAgent( - "You are a helpful assistant.", - model=model, - tools=[WeatherToolkit().get_weather_data], -) - -resp = agent.step( - "What's the temperature in New York today?", - response_format=ResponseFormat, -) - -print(resp.msg.parsed) diff --git a/examples/minimum_agents/mistral_agent.py b/examples/minimum_agents/mistral_agent.py deleted file mode 100644 index 911a6ae67c..0000000000 --- a/examples/minimum_agents/mistral_agent.py +++ /dev/null @@ -1,44 +0,0 @@ -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= - -from pydantic import BaseModel - -from camel.agents import ChatAgent -from camel.models import ModelFactory, ModelPlatformType -from camel.toolkits import WeatherToolkit -from camel.types import ModelType - -model = ModelFactory.create( - model_platform=ModelPlatformType.MISTRAL, - model_type=ModelType.MISTRAL_8B, -) - - -class ResponseFormat(BaseModel): - max_temp: str - min_temp: str - - -agent = ChatAgent( - "You are a helpful assistant.", - model=model, - tools=[WeatherToolkit().get_weather_data], -) - -resp = agent.step( - "What's the temperature in New York today?", - response_format=ResponseFormat, -) - -print(resp.msg.parsed) diff --git a/examples/minimum_agents/openai_agent.py b/examples/minimum_agents/openai_agent.py deleted file mode 100644 index 7acc92993b..0000000000 --- a/examples/minimum_agents/openai_agent.py +++ /dev/null @@ -1,36 +0,0 @@ -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= - -from pydantic import BaseModel - -from camel.agents import ChatAgent -from camel.toolkits import WeatherToolkit - - -class ResponseFormat(BaseModel): - max_temp: str - min_temp: str - - -agent = ChatAgent( - "You are a helpful assistant.", - tools=[WeatherToolkit().get_weather_data], -) - -resp = agent.step( - "What's the temperature in New York today?", - response_format=ResponseFormat, -) - -print(resp.msg.parsed) diff --git a/examples/minimum_agents/qwen_agent.py b/examples/minimum_agents/qwen_agent.py deleted file mode 100644 index 81c1f0627c..0000000000 --- a/examples/minimum_agents/qwen_agent.py +++ /dev/null @@ -1,44 +0,0 @@ -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= - -from pydantic import BaseModel - -from camel.agents import ChatAgent -from camel.models import ModelFactory -from camel.toolkits import WeatherToolkit -from camel.types import ModelPlatformType, ModelType - -model = ModelFactory.create( - model_platform=ModelPlatformType.QWEN, - model_type=ModelType.QWEN_TURBO, -) - - -class ResponseFormat(BaseModel): - max_temp: str - min_temp: str - - -agent = ChatAgent( - "You are a helpful assistant.", - model=model, - tools=[WeatherToolkit().get_weather_data], -) - -resp = agent.step( - "What's the temperature in New York today?", - response_format=ResponseFormat, -) - -print(resp.msg.parsed) diff --git a/test/agents/test_chat_agent.py b/test/agents/test_chat_agent.py index 3845740e1f..870a2ba142 100644 --- a/test/agents/test_chat_agent.py +++ b/test/agents/test_chat_agent.py @@ -335,7 +335,7 @@ def test_chat_agent_step_with_external_tools(step_call_count=3): ), ) - model._run = MagicMock( + model.run = MagicMock( side_effect=[model_backend_external1, model_backend_external2] * step_call_count ) @@ -361,9 +361,11 @@ def test_chat_agent_step_with_external_tools(step_call_count=3): response = external_tool_agent.step(usr_msg) assert not response.msg.content - external_tool_request = response.info["external_tool_request"] + external_tool_call_request = response.info[ + "external_tool_call_request" + ] assert ( - external_tool_request.function.name == "sub" + external_tool_call_request.tool_name == "sub" ), f"Error in calling round {i+1}" @@ -449,20 +451,6 @@ def test_chat_agent_multiple_return_messages(n, step_call_count=3): content="What do you call fake spaghetti? An impasta!", role="assistant", function_call=None, - tool_calls=[ - ChatCompletionMessageToolCall( - id="call_mock123456", - function=Function( - arguments='{ \ - "joke":"What do you call fake spaghetti?" \ - " An impasta!", \ - "funny_level":"6" \ - }', - name="return_json_response", - ), - type="function", - ) - ], ), ) ] @@ -476,7 +464,7 @@ def test_chat_agent_multiple_return_messages(n, step_call_count=3): total_tokens=47, ), ) - model._run = MagicMock(return_value=model_backend_rsp_tool) + model.run = MagicMock(return_value=model_backend_rsp_tool) system_msg = BaseMessage( "Assistant", @@ -542,7 +530,7 @@ def test_chat_agent_multiple_return_message_error(n, step_call_count=3): ), ) ) - model._run = MagicMock(return_value=model_backend_multi_messages) + model.run = MagicMock(return_value=model_backend_multi_messages) system_msg = BaseMessage( "Assistant", @@ -594,7 +582,7 @@ def test_chat_agent_stream_output(step_call_count=3): model_type=ModelType.GPT_4O_MINI, model_config_dict=stream_model_config.as_dict(), ) - model._run = MagicMock(return_value=model_backend_rsp_base) + model.run = MagicMock(return_value=model_backend_rsp_base) stream_assistant = ChatAgent(system_msg, model=model) stream_assistant.reset() for i in range(step_call_count): @@ -630,7 +618,7 @@ def test_set_output_language(): # Set the output language to "Arabic" output_language = "Arabic" - agent.set_output_language(output_language) + agent.output_language = output_language # Check if the output language is set correctly assert agent.output_language == output_language @@ -658,12 +646,12 @@ def test_set_multiple_output_language(): # Verify that the length of the system message is kept constant even when # multiple set_output_language operations are called - agent_with_sys_msg.set_output_language("Chinese") - agent_with_sys_msg.set_output_language("English") - agent_with_sys_msg.set_output_language("French") - agent_without_sys_msg.set_output_language("Chinese") - agent_without_sys_msg.set_output_language("English") - agent_without_sys_msg.set_output_language("French") + agent_with_sys_msg.output_language = "Chinese" + agent_with_sys_msg.output_language = "English" + agent_with_sys_msg.output_language = "French" + agent_without_sys_msg.output_language = "Chinese" + agent_without_sys_msg.output_language = "English" + agent_without_sys_msg.output_language = "French" updated_system_message_with_sys_msg = { 'role': 'system', @@ -689,29 +677,6 @@ def test_set_multiple_output_language(): ) -@pytest.mark.model_backend -def test_function_enabled(): - system_message = BaseMessage( - role_name="assistant", - role_type=RoleType.ASSISTANT, - meta_dict=None, - content="You are a help assistant.", - ) - model = ModelFactory.create( - model_platform=ModelPlatformType.OPENAI, - model_type=ModelType.GPT_4O_MINI, - ) - agent_no_func = ChatAgent(system_message=system_message) - agent_with_funcs = ChatAgent( - system_message=system_message, - model=model, - tools=MathToolkit().get_tools(), - ) - - assert not agent_no_func.is_tools_added() - assert agent_with_funcs.is_tools_added() - - @pytest.mark.model_backend def test_tool_calling_sync(step_call_count=3): system_message = BaseMessage( @@ -845,7 +810,7 @@ def test_tool_calling_sync(step_call_count=3): ), ) - model._run = MagicMock( + model.run = MagicMock( side_effect=[ model_backend_rsp_tool, model_backend_rsp_tool1, @@ -971,7 +936,7 @@ async def test_tool_calling_math_async(step_call_count=3): ), ) - model._run = MagicMock( + model.run = MagicMock( side_effect=[ model_backend_rsp_tool, model_backend_rsp_tool1, @@ -1026,7 +991,7 @@ async def async_sleep(second: int) -> int: # Mock tool calling def mock_run_tool_calling_async(*args, **kwargs): # Reset tool_calls at the beginning of each new round of step() call - if model._run.call_count % 2 == 1: + if model.run.call_count % 2 == 1: model_backend_rsp_tool_async.choices[0].message.tool_calls = [ ChatCompletionMessageToolCall( id='call_mock_123456', @@ -1049,7 +1014,7 @@ def mock_run_tool_calling_async(*args, **kwargs): return model_backend_rsp_tool_async - model._run = MagicMock(side_effect=mock_run_tool_calling_async) + model.run = MagicMock(side_effect=mock_run_tool_calling_async) agent = ChatAgent( system_message=system_message, diff --git a/test/agents/test_role_playing.py b/test/agents/test_role_playing.py index a5c66aeb8f..5a12808b91 100644 --- a/test/agents/test_role_playing.py +++ b/test/agents/test_role_playing.py @@ -148,7 +148,7 @@ def test_role_playing_step( step_call_count=3, ): if model is not None: - model._run = MagicMock(return_value=model_backend_rsp) + model.run = MagicMock(return_value=model_backend_rsp) role_playing = RolePlaying( assistant_role_name="AI Assistant", @@ -196,7 +196,7 @@ def test_role_playing_step( @pytest.mark.model_backend def test_role_playing_with_function(step_call_count=3): if model is not None: - model._run = MagicMock(return_value=model_backend_rsp) + model.run = MagicMock(return_value=model_backend_rsp) tools = MathToolkit().get_tools() diff --git a/test/models/test_model_factory.py b/test/models/test_model_factory.py index 3e06732ed2..58f4a6fed9 100644 --- a/test/models/test_model_factory.py +++ b/test/models/test_model_factory.py @@ -138,7 +138,7 @@ def test_model_factory(model_platform, model_type): "content": "Hello", }, ] - response = model_inst._run(messages).model_dump() + response = model_inst.run(messages).model_dump() assert isinstance(response, dict) assert 'id' in response assert isinstance(response['id'], str) diff --git a/test/models/test_model_manager.py b/test/models/test_model_manager.py index 6b36684540..0027e4d4d1 100644 --- a/test/models/test_model_manager.py +++ b/test/models/test_model_manager.py @@ -68,8 +68,8 @@ def test_model_manager( assert model_manager.scheduling_strategy.__name__ == "round_robin" for model in model_manager.models: if TYPE_CHECKING: - assert isinstance(model._run, Mock) - assert model._run.call_count == times_each_model_called + assert isinstance(model.run, Mock) + assert model.run.call_count == times_each_model_called if strategy == "always_first": assert model_manager.scheduling_strategy.__name__ == "always_first" assert models[0].run.call_count == times_each_model_called @@ -79,8 +79,8 @@ def test_model_manager( total_calls = 0 for model in model_manager.models: if TYPE_CHECKING: - assert isinstance(model._run, Mock) - total_calls += model._run.call_count + assert isinstance(model.run, Mock) + total_calls += model.run.call_count assert total_calls == times_each_model_called From 1ab516aac6bd9e597b229f12838d0267164aa343 Mon Sep 17 00:00:00 2001 From: Isaac Jin Date: Sun, 16 Feb 2025 17:18:19 -0600 Subject: [PATCH 27/28] update models --- camel/models/deepseek_model.py | 97 ++++++++++++++-------------------- 1 file changed, 40 insertions(+), 57 deletions(-) diff --git a/camel/models/deepseek_model.py b/camel/models/deepseek_model.py index 7591279f20..c1949cb459 100644 --- a/camel/models/deepseek_model.py +++ b/camel/models/deepseek_model.py @@ -128,24 +128,6 @@ def _prepare_request( if self.model_type in [ ModelType.DEEPSEEK_REASONER, ]: - import re - - # Remove thinking content from messages before sending to API - # This ensures only the final response is sent, excluding - # intermediate thought processes - messages = [ - { # type: ignore[misc] - **msg, - 'content': re.sub( - r'.*?', - '', - msg['content'], # type: ignore[arg-type] - flags=re.DOTALL, - ).strip(), - } - for msg in messages - ] - logger.warning( "Warning: You are using an DeepSeek Reasoner model, " "which has certain limitations, reference: " @@ -169,6 +151,44 @@ def _prepare_request( return request_config + def _post_handle_response( + self, response: ChatCompletion + ) -> ChatCompletion: + """Handle reasoning content with tags at the beginning.""" + if ( + self.model_type in [ModelType.DEEPSEEK_REASONER] + and os.environ.get("GET_REASONING_CONTENT", "false").lower() + == "true" + ): + reasoning_content = response.choices[0].message.reasoning_content + combined_content = ( + f"\n{reasoning_content}\n\n" + if reasoning_content + else "" + ) + response.choices[0].message.content + + response = ChatCompletion.construct( + id=response.id, + choices=[ + dict( + index=response.choices[0].index, + message={ + "role": response.choices[0].message.role, + "content": combined_content, + "tool_calls": None, + }, + finish_reason=response.choices[0].finish_reason + if response.choices[0].finish_reason + else None, + ) + ], + created=response.created, + model=response.model, + object="chat.completion", + usage=response.usage, + ) + return response + def _run( self, messages: List[OpenAIMessage], @@ -196,7 +216,7 @@ def _run( **request_config, ) - return response + return self._post_handle_response(response) async def _arun( self, @@ -224,44 +244,7 @@ async def _arun( **request_config, ) - # Handle reasoning content with tags at the beginning - if ( - self.model_type - in [ - ModelType.DEEPSEEK_REASONER, - ] - and os.environ.get("GET_REASONING_CONTENT", "false").lower() - == "true" - ): - reasoning_content = response.choices[0].message.reasoning_content - combined_content = ( - f"\n{reasoning_content}\n\n" - if reasoning_content - else "" - ) + response.choices[0].message.content - - response = ChatCompletion.construct( - id=response.id, - choices=[ - dict( - index=response.choices[0].index, - message={ - "role": response.choices[0].message.role, - "content": combined_content, - "tool_calls": None, - }, - finish_reason=response.choices[0].finish_reason - if response.choices[0].finish_reason - else None, - ) - ], - created=response.created, - model=response.model, - object="chat.completion", - usage=response.usage, - ) - - return response + return self._post_handle_response(response) def check_model_config(self): r"""Check whether the model configuration contains any From 08935d00d7358e2966c9fd9b26bf28bee05f2263 Mon Sep 17 00:00:00 2001 From: Wendong Date: Mon, 17 Feb 2025 19:18:42 +0800 Subject: [PATCH 28/28] unit test and mypy fix --- camel/models/deepseek_model.py | 6 +++--- test/models/test_base_model.py | 12 ++++++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/camel/models/deepseek_model.py b/camel/models/deepseek_model.py index c1949cb459..b8c9e100a9 100644 --- a/camel/models/deepseek_model.py +++ b/camel/models/deepseek_model.py @@ -154,14 +154,14 @@ def _prepare_request( def _post_handle_response( self, response: ChatCompletion ) -> ChatCompletion: - """Handle reasoning content with tags at the beginning.""" + r"""Handle reasoning content with tags at the beginning.""" if ( self.model_type in [ModelType.DEEPSEEK_REASONER] and os.environ.get("GET_REASONING_CONTENT", "false").lower() == "true" ): - reasoning_content = response.choices[0].message.reasoning_content - combined_content = ( + reasoning_content = response.choices[0].message.reasoning_content # type: ignore[attr-defined] + combined_content = ( # type: ignore[operator] f"\n{reasoning_content}\n\n" if reasoning_content else "" diff --git a/test/models/test_base_model.py b/test/models/test_base_model.py index f84b7b1e1e..06e6688d3d 100644 --- a/test/models/test_base_model.py +++ b/test/models/test_base_model.py @@ -34,6 +34,12 @@ def run(self, messages): def check_model_config(self): pass + def _run(self, messages, response_format=None, tools=None): + pass + + async def _arun(self, messages, response_format=None, tools=None): + pass + model = DummyModel(ModelType.GPT_4O_MINI) # Test basic thinking removal @@ -78,6 +84,12 @@ def run(self, messages): def check_model_config(self): pass + def _run(self, messages, response_format=None, tools=None): + pass + + async def _arun(self, messages, response_format=None, tools=None): + pass + model = TestModel(ModelType.GPT_4O_MINI) messages = [ {'role': 'user', 'content': 'Hello hi world'}