diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 49c8ea650f..72743bb8a1 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -86,18 +86,18 @@ from camel.utils import track_agent -class FunctionCallingRecord(BaseModel): - r"""Historical records of functions called in the conversation. +class ToolCallingRecord(BaseModel): + r"""Historical records of tools called in the conversation. Attributes: - func_name (str): The name of the function being called. + tool_name (str): The name of the tool being called. args (Dict[str, Any]): The dictionary of arguments passed to - the function. - result (Any): The execution result of calling this function. + the tools. + result (Any): The execution result of calling this tool. tool_call_id (str): The ID of the tool call, if available. """ - func_name: str + tool_name: str args: Dict[str, Any] result: Any tool_call_id: str @@ -106,10 +106,10 @@ def __str__(self) -> str: r"""Overridden version of the string function. Returns: - str: Modified string to represent the function calling. + str: Modified string to represent the tool calling. """ return ( - f"Function Execution: {self.func_name}\n" + f"Tool Execution: {self.tool_name}\n" f"\tArgs: {self.args}\n" f"\tResult: {self.result}\n" ) @@ -489,7 +489,7 @@ def get_info( usage: Optional[Dict[str, int]], termination_reasons: List[str], num_tokens: int, - tool_calls: List[FunctionCallingRecord], + tool_calls: List[ToolCallingRecord], external_tool_request: Optional[ChatCompletionMessageToolCall] = None, ) -> Dict[str, Any]: r"""Returns a dictionary containing information about the chat session. @@ -501,7 +501,7 @@ def get_info( termination_reasons (List[str]): The reasons for the termination of the chat session. num_tokens (int): The number of tokens used in the chat session. - tool_calls (List[FunctionCallingRecord]): The list of function + tool_calls (List[ToolCallingRecord]): The list of function calling records, containing the information of called tools. external_tool_request (Optional[ChatCompletionMessageToolCall], optional): @@ -645,7 +645,7 @@ def _handle_step( ) # Record function calls made during the session - tool_call_records: List[FunctionCallingRecord] = [] + tool_call_records: List[ToolCallingRecord] = [] external_tool_request = None @@ -885,7 +885,7 @@ async def step_async( self.update_memory(input_message, OpenAIBackendRole.USER) - tool_call_records: List[FunctionCallingRecord] = [] + tool_call_records: List[ToolCallingRecord] = [] while True: try: openai_messages, num_tokens = self.memory.get_context() @@ -970,7 +970,7 @@ async def step_async( def _step_tool_call_and_update( self, response: ChatCompletion - ) -> FunctionCallingRecord: + ) -> ToolCallingRecord: r"""Processes a function call within the chat completion response, records the function call in the provided list of tool calls and updates the memory of the current agent. @@ -980,7 +980,7 @@ def _step_tool_call_and_update( completion. Returns: - FunctionCallingRecord: The record of calling the function. + ToolCallingRecord: The record of calling the function. """ # Perform function calling @@ -996,7 +996,7 @@ def _step_tool_call_and_update( async def _step_tool_call_and_update_async( self, response: ChatCompletion - ) -> FunctionCallingRecord: + ) -> ToolCallingRecord: ( func_assistant_msg, func_result_msg, @@ -1015,7 +1015,7 @@ def _structure_output_with_function( List[str], Dict[str, int], str, - FunctionCallingRecord, + ToolCallingRecord, int, ]: r"""Internal function of structuring the output of the agent based on @@ -1027,7 +1027,7 @@ def _structure_output_with_function( Returns: Tuple[List[BaseMessage], List[str], Dict[str, int], str, - FunctionCallingRecord, int]: + ToolCallingRecord, int]: A tuple containing the output messages, finish reasons, usage dictionary, response ID, function calling record, and number of tokens. @@ -1141,7 +1141,7 @@ def _step_get_info( finish_reasons: List[str], usage_dict: Dict[str, int], response_id: str, - tool_calls: List[FunctionCallingRecord], + tool_calls: List[ToolCallingRecord], num_tokens: int, external_tool_request: Optional[ChatCompletionMessageToolCall] = None, ) -> Dict[str, Any]: @@ -1160,7 +1160,7 @@ def _step_get_info( usage_dict (Dict[str, int]): Dictionary containing token usage information. response_id (str): The ID of the response from the model. - tool_calls (List[FunctionCallingRecord]): Records of function calls + tool_calls (List[ToolCallingRecord]): Records of function calls made during this step. num_tokens (int): The number of tokens used in this step. external_tool_request (Optional[ChatCompletionMessageToolCall]): @@ -1335,7 +1335,7 @@ def handle_stream_response( def _step_token_exceed( self, num_tokens: int, - tool_calls: List[FunctionCallingRecord], + tool_calls: List[ToolCallingRecord], termination_reason: str, ) -> ChatAgentResponse: r"""Return trivial response containing number of tokens and information @@ -1343,7 +1343,7 @@ def _step_token_exceed( Args: num_tokens (int): Number of tokens in the messages. - tool_calls (List[FunctionCallingRecord]): List of information + tool_calls (List[ToolCallingRecord]): List of information objects of functions called in the current step. termination_reason (str): String of termination reason. @@ -1372,7 +1372,7 @@ def _step_tool_call( self, response: ChatCompletion, ) -> Tuple[ - FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord + FunctionCallingMessage, FunctionCallingMessage, ToolCallingRecord ]: r"""Execute the function with arguments following the model's response. @@ -1418,8 +1418,8 @@ def _step_tool_call( ) # Record information about this function call - func_record = FunctionCallingRecord( - func_name=func_name, + func_record = ToolCallingRecord( + tool_name=func_name, args=args, result=result, tool_call_id=tool_call_id, @@ -1442,7 +1442,7 @@ async def step_tool_call_async( self, response: ChatCompletion, ) -> Tuple[ - FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord + FunctionCallingMessage, FunctionCallingMessage, ToolCallingRecord ]: r"""Execute the async function with arguments following the model's response. @@ -1488,8 +1488,8 @@ async def step_tool_call_async( ) # Record information about this function call - func_record = FunctionCallingRecord( - func_name=func_name, + func_record = ToolCallingRecord( + tool_name=func_name, args=args, result=result, tool_call_id=tool_call_id, diff --git a/examples/models/openai_o3_mini_example.py b/examples/models/openai_o3_mini_example.py index d2d89f77ee..3ee7bbf950 100644 --- a/examples/models/openai_o3_mini_example.py +++ b/examples/models/openai_o3_mini_example.py @@ -40,7 +40,7 @@ print(str(response.info['tool_calls'])[:1000]) ''' =============================================================================== -[FunctionCallingRecord(func_name='search_duckduckgo', args={'query': 'what is +[ToolCallingRecord(func_name='search_duckduckgo', args={'query': 'what is deepseek r1, and do a comparison between deepseek r1 and openai o3 mini', 'source': 'text', 'max_results': 5}, result=[{'result_id': 1, 'title': 'DeepSeek R1 vs OpenAI o1: Which One is Better? - Analytics Vidhya', diff --git a/examples/models/role_playing_with_cohere.py b/examples/models/role_playing_with_cohere.py index ff93313d00..89770e94f3 100644 --- a/examples/models/role_playing_with_cohere.py +++ b/examples/models/role_playing_with_cohere.py @@ -15,7 +15,7 @@ from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import CohereConfig from camel.models import ModelFactory from camel.societies import RolePlaying @@ -120,8 +120,8 @@ def main( # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/models/role_playing_with_mistral.py b/examples/models/role_playing_with_mistral.py index e92ade930c..5ef2217d67 100644 --- a/examples/models/role_playing_with_mistral.py +++ b/examples/models/role_playing_with_mistral.py @@ -16,7 +16,7 @@ from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import MistralConfig from camel.models import ModelFactory from camel.societies import RolePlaying @@ -120,8 +120,8 @@ def main( # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/models/role_playing_with_ollama.py b/examples/models/role_playing_with_ollama.py index 77eaeec33e..fe58c5a44b 100644 --- a/examples/models/role_playing_with_ollama.py +++ b/examples/models/role_playing_with_ollama.py @@ -16,7 +16,7 @@ from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.models import ModelFactory from camel.societies import RolePlaying from camel.types import ModelPlatformType @@ -100,8 +100,8 @@ def main( # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/models/role_playing_with_sambanova.py b/examples/models/role_playing_with_sambanova.py index 2cbba7342d..9b8834fe24 100644 --- a/examples/models/role_playing_with_sambanova.py +++ b/examples/models/role_playing_with_sambanova.py @@ -17,7 +17,7 @@ import agentops from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import SambaCloudAPIConfig from camel.models import ModelFactory from camel.societies import RolePlaying @@ -128,8 +128,8 @@ def main( # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/observability/agentops_track_roleplaying_with_function.py b/examples/observability/agentops_track_roleplaying_with_function.py index 800ddfcf3f..4570ae4f6e 100644 --- a/examples/observability/agentops_track_roleplaying_with_function.py +++ b/examples/observability/agentops_track_roleplaying_with_function.py @@ -17,7 +17,7 @@ import agentops from colorama import Fore -from camel.agents.chat_agent import FunctionCallingRecord +from camel.agents.chat_agent import ToolCallingRecord from camel.configs import ChatGPTConfig from camel.models import ModelFactory from camel.societies import RolePlaying @@ -125,8 +125,8 @@ # Print output from the assistant, including any function # execution information print_text_animated(Fore.GREEN + "AI Assistant:") - tool_calls: List[FunctionCallingRecord] = [ - FunctionCallingRecord(**call.as_dict()) + tool_calls: List[ToolCallingRecord] = [ + ToolCallingRecord(**call.as_dict()) for call in assistant_response.info['tool_calls'] ] for func_record in tool_calls: diff --git a/examples/toolkits/arxiv_toolkit.py b/examples/toolkits/arxiv_toolkit.py index ddf3a89ddd..57e2ba99f7 100644 --- a/examples/toolkits/arxiv_toolkit.py +++ b/examples/toolkits/arxiv_toolkit.py @@ -55,7 +55,7 @@ print(str(response.info['tool_calls'])[:1000]) ''' =============================================================================== -[FunctionCallingRecord(func_name='search_papers', args={'query': 'attention is +[ToolCallingRecord(func_name='search_papers', args={'query': 'attention is all you need'}, result=[{'title': "Attention Is All You Need But You Don't Need All Of It For Inference of Large Language Models", 'published_date': '2024-07-22', 'authors': ['Georgy Tyukin', 'Gbetondji J-S Dovonon', 'Jean @@ -85,7 +85,7 @@ print(str(response.info['tool_calls'])[:1000]) ''' =============================================================================== -[FunctionCallingRecord(func_name='download_papers', args={'query': 'attention +[ToolCallingRecord(func_name='download_papers', args={'query': 'attention is all you need', 'output_dir': '/Users/enrei/Desktop/camel0826/camel/examples/ tool_call', 'paper_ids': ['2407.15516v1', '2107.08000v1', '2306.01926v1', '2112.05993v1', '1912.11959v2']}, result='papers downloaded successfully')] diff --git a/examples/toolkits/google_scholar_toolkit.py b/examples/toolkits/google_scholar_toolkit.py index b00a50353b..75137d6fcd 100644 --- a/examples/toolkits/google_scholar_toolkit.py +++ b/examples/toolkits/google_scholar_toolkit.py @@ -58,7 +58,7 @@ print(str(response.info['tool_calls'])[:1000]) """ =============================================================================== -[FunctionCallingRecord(func_name='get_author_detailed_info', args={}, result= +[ToolCallingRecord(func_name='get_author_detailed_info', args={}, result= {'container_type': 'Author', 'filled': ['basics', 'indices', 'counts', 'coauthors', 'publications', 'public_access'], 'scholar_id': 'JicYPdAAAAAJ', 'source': , 'name': @@ -98,7 +98,7 @@ print(str(response.info['tool_calls'])[:1000]) """ =============================================================================== -[FunctionCallingRecord(func_name='get_author_publications', args={}, result= +[ToolCallingRecord(func_name='get_author_publications', args={}, result= ['Imagenet classification with deep convolutional neural networks', 'Deep learning', 'Learning internal representations by error-propagation', 'Dropout: a simple way to prevent neural networks from overfitting', 'Visualizing data @@ -127,7 +127,7 @@ print(response.info['tool_calls']) """ =============================================================================== -[FunctionCallingRecord(func_name='get_publication_by_title', args= +[ToolCallingRecord(func_name='get_publication_by_title', args= {'publication_title': 'Camel: Communicative agents for" mind" exploration of large language model society'}, result={'container_type': 'Publication', 'source': 0, f"Error in calling round {i+1}" assert str(tool_calls[0]).startswith( - "Function Execution" + "Tool Execution" ), f"Error in calling round {i+1}" assert ( - tool_calls[0].func_name == "multiply" + tool_calls[0].tool_name == "multiply" ), f"Error in calling round {i+1}" assert tool_calls[0].args == { "a": 2, @@ -985,7 +985,7 @@ async def test_tool_calling_math_async(step_call_count=3): tool_calls = agent_response.info['tool_calls'] assert ( - tool_calls[0].func_name == "multiply" + tool_calls[0].tool_name == "multiply" ), f"Error in calling round {i+1}" assert tool_calls[0].args == { "a": 2, @@ -1074,11 +1074,11 @@ def mock_run_tool_calling_async(*args, **kwargs): assert tool_calls, f"Error in calling round {i+1}" assert str(tool_calls[0]).startswith( - "Function Execution" + "Tool Execution" ), f"Error in calling round {i+1}" assert ( - tool_calls[0].func_name == "async_sleep" + tool_calls[0].tool_name == "async_sleep" ), f"Error in calling round {i+1}" assert tool_calls[0].args == { 'second': 1