Skip to content

Commit

Permalink
chore: Rename tool call instances (#1492)
Browse files Browse the repository at this point in the history
Co-authored-by: Wendong-Fan <133094783+Wendong-Fan@users.noreply.github.com>
Co-authored-by: Wendong <w3ndong.fan@gmail.com>
  • Loading branch information
3 people authored and apokryphosx committed Feb 11, 2025
1 parent ee9b32b commit 00f2db3
Show file tree
Hide file tree
Showing 13 changed files with 63 additions and 63 deletions.
54 changes: 27 additions & 27 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,18 +86,18 @@
from camel.utils import track_agent


class FunctionCallingRecord(BaseModel):
r"""Historical records of functions called in the conversation.
class ToolCallingRecord(BaseModel):
r"""Historical records of tools called in the conversation.
Attributes:
func_name (str): The name of the function being called.
tool_name (str): The name of the tool being called.
args (Dict[str, Any]): The dictionary of arguments passed to
the function.
result (Any): The execution result of calling this function.
the tools.
result (Any): The execution result of calling this tool.
tool_call_id (str): The ID of the tool call, if available.
"""

func_name: str
tool_name: str
args: Dict[str, Any]
result: Any
tool_call_id: str
Expand All @@ -106,10 +106,10 @@ def __str__(self) -> str:
r"""Overridden version of the string function.
Returns:
str: Modified string to represent the function calling.
str: Modified string to represent the tool calling.
"""
return (
f"Function Execution: {self.func_name}\n"
f"Tool Execution: {self.tool_name}\n"
f"\tArgs: {self.args}\n"
f"\tResult: {self.result}\n"
)
Expand Down Expand Up @@ -489,7 +489,7 @@ def get_info(
usage: Optional[Dict[str, int]],
termination_reasons: List[str],
num_tokens: int,
tool_calls: List[FunctionCallingRecord],
tool_calls: List[ToolCallingRecord],
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
) -> Dict[str, Any]:
r"""Returns a dictionary containing information about the chat session.
Expand All @@ -501,7 +501,7 @@ def get_info(
termination_reasons (List[str]): The reasons for the termination
of the chat session.
num_tokens (int): The number of tokens used in the chat session.
tool_calls (List[FunctionCallingRecord]): The list of function
tool_calls (List[ToolCallingRecord]): The list of function
calling records, containing the information of called tools.
external_tool_request
(Optional[ChatCompletionMessageToolCall], optional):
Expand Down Expand Up @@ -645,7 +645,7 @@ def _handle_step(
)

# Record function calls made during the session
tool_call_records: List[FunctionCallingRecord] = []
tool_call_records: List[ToolCallingRecord] = []

external_tool_request = None

Expand Down Expand Up @@ -885,7 +885,7 @@ async def step_async(

self.update_memory(input_message, OpenAIBackendRole.USER)

tool_call_records: List[FunctionCallingRecord] = []
tool_call_records: List[ToolCallingRecord] = []
while True:
try:
openai_messages, num_tokens = self.memory.get_context()
Expand Down Expand Up @@ -970,7 +970,7 @@ async def step_async(

def _step_tool_call_and_update(
self, response: ChatCompletion
) -> FunctionCallingRecord:
) -> ToolCallingRecord:
r"""Processes a function call within the chat completion response,
records the function call in the provided list of tool calls and
updates the memory of the current agent.
Expand All @@ -980,7 +980,7 @@ def _step_tool_call_and_update(
completion.
Returns:
FunctionCallingRecord: The record of calling the function.
ToolCallingRecord: The record of calling the function.
"""

# Perform function calling
Expand All @@ -996,7 +996,7 @@ def _step_tool_call_and_update(

async def _step_tool_call_and_update_async(
self, response: ChatCompletion
) -> FunctionCallingRecord:
) -> ToolCallingRecord:
(
func_assistant_msg,
func_result_msg,
Expand All @@ -1015,7 +1015,7 @@ def _structure_output_with_function(
List[str],
Dict[str, int],
str,
FunctionCallingRecord,
ToolCallingRecord,
int,
]:
r"""Internal function of structuring the output of the agent based on
Expand All @@ -1027,7 +1027,7 @@ def _structure_output_with_function(
Returns:
Tuple[List[BaseMessage], List[str], Dict[str, int], str,
FunctionCallingRecord, int]:
ToolCallingRecord, int]:
A tuple containing the output messages, finish reasons, usage
dictionary, response ID, function calling record, and number of
tokens.
Expand Down Expand Up @@ -1141,7 +1141,7 @@ def _step_get_info(
finish_reasons: List[str],
usage_dict: Dict[str, int],
response_id: str,
tool_calls: List[FunctionCallingRecord],
tool_calls: List[ToolCallingRecord],
num_tokens: int,
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
) -> Dict[str, Any]:
Expand All @@ -1160,7 +1160,7 @@ def _step_get_info(
usage_dict (Dict[str, int]): Dictionary containing token usage
information.
response_id (str): The ID of the response from the model.
tool_calls (List[FunctionCallingRecord]): Records of function calls
tool_calls (List[ToolCallingRecord]): Records of function calls
made during this step.
num_tokens (int): The number of tokens used in this step.
external_tool_request (Optional[ChatCompletionMessageToolCall]):
Expand Down Expand Up @@ -1335,15 +1335,15 @@ def handle_stream_response(
def _step_token_exceed(
self,
num_tokens: int,
tool_calls: List[FunctionCallingRecord],
tool_calls: List[ToolCallingRecord],
termination_reason: str,
) -> ChatAgentResponse:
r"""Return trivial response containing number of tokens and information
of called functions when the number of tokens exceeds.
Args:
num_tokens (int): Number of tokens in the messages.
tool_calls (List[FunctionCallingRecord]): List of information
tool_calls (List[ToolCallingRecord]): List of information
objects of functions called in the current step.
termination_reason (str): String of termination reason.
Expand Down Expand Up @@ -1372,7 +1372,7 @@ def _step_tool_call(
self,
response: ChatCompletion,
) -> Tuple[
FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
FunctionCallingMessage, FunctionCallingMessage, ToolCallingRecord
]:
r"""Execute the function with arguments following the model's response.
Expand Down Expand Up @@ -1418,8 +1418,8 @@ def _step_tool_call(
)

# Record information about this function call
func_record = FunctionCallingRecord(
func_name=func_name,
func_record = ToolCallingRecord(
tool_name=func_name,
args=args,
result=result,
tool_call_id=tool_call_id,
Expand All @@ -1442,7 +1442,7 @@ async def step_tool_call_async(
self,
response: ChatCompletion,
) -> Tuple[
FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
FunctionCallingMessage, FunctionCallingMessage, ToolCallingRecord
]:
r"""Execute the async function with arguments following the model's
response.
Expand Down Expand Up @@ -1488,8 +1488,8 @@ async def step_tool_call_async(
)

# Record information about this function call
func_record = FunctionCallingRecord(
func_name=func_name,
func_record = ToolCallingRecord(
tool_name=func_name,
args=args,
result=result,
tool_call_id=tool_call_id,
Expand Down
2 changes: 1 addition & 1 deletion examples/models/openai_o3_mini_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
print(str(response.info['tool_calls'])[:1000])
'''
===============================================================================
[FunctionCallingRecord(func_name='search_duckduckgo', args={'query': 'what is
[ToolCallingRecord(func_name='search_duckduckgo', args={'query': 'what is
deepseek r1, and do a comparison between deepseek r1 and openai o3 mini',
'source': 'text', 'max_results': 5}, result=[{'result_id': 1, 'title':
'DeepSeek R1 vs OpenAI o1: Which One is Better? - Analytics Vidhya',
Expand Down
6 changes: 3 additions & 3 deletions examples/models/role_playing_with_cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.configs import CohereConfig
from camel.models import ModelFactory
from camel.societies import RolePlaying
Expand Down Expand Up @@ -120,8 +120,8 @@ def main(
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
6 changes: 3 additions & 3 deletions examples/models/role_playing_with_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.configs import MistralConfig
from camel.models import ModelFactory
from camel.societies import RolePlaying
Expand Down Expand Up @@ -120,8 +120,8 @@ def main(
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
6 changes: 3 additions & 3 deletions examples/models/role_playing_with_ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.models import ModelFactory
from camel.societies import RolePlaying
from camel.types import ModelPlatformType
Expand Down Expand Up @@ -100,8 +100,8 @@ def main(
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
6 changes: 3 additions & 3 deletions examples/models/role_playing_with_sambanova.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import agentops
from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.configs import SambaCloudAPIConfig
from camel.models import ModelFactory
from camel.societies import RolePlaying
Expand Down Expand Up @@ -128,8 +128,8 @@ def main(
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import agentops
from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.agents.chat_agent import ToolCallingRecord
from camel.configs import ChatGPTConfig
from camel.models import ModelFactory
from camel.societies import RolePlaying
Expand Down Expand Up @@ -125,8 +125,8 @@
# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = [
FunctionCallingRecord(**call.as_dict())
tool_calls: List[ToolCallingRecord] = [
ToolCallingRecord(**call.as_dict())
for call in assistant_response.info['tool_calls']
]
for func_record in tool_calls:
Expand Down
4 changes: 2 additions & 2 deletions examples/toolkits/arxiv_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
print(str(response.info['tool_calls'])[:1000])
'''
===============================================================================
[FunctionCallingRecord(func_name='search_papers', args={'query': 'attention is
[ToolCallingRecord(func_name='search_papers', args={'query': 'attention is
all you need'}, result=[{'title': "Attention Is All You Need But You Don't
Need All Of It For Inference of Large Language Models", 'published_date':
'2024-07-22', 'authors': ['Georgy Tyukin', 'Gbetondji J-S Dovonon', 'Jean
Expand Down Expand Up @@ -85,7 +85,7 @@
print(str(response.info['tool_calls'])[:1000])
'''
===============================================================================
[FunctionCallingRecord(func_name='download_papers', args={'query': 'attention
[ToolCallingRecord(func_name='download_papers', args={'query': 'attention
is all you need', 'output_dir': '/Users/enrei/Desktop/camel0826/camel/examples/
tool_call', 'paper_ids': ['2407.15516v1', '2107.08000v1', '2306.01926v1',
'2112.05993v1', '1912.11959v2']}, result='papers downloaded successfully')]
Expand Down
8 changes: 4 additions & 4 deletions examples/toolkits/google_scholar_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
print(str(response.info['tool_calls'])[:1000])
"""
===============================================================================
[FunctionCallingRecord(func_name='get_author_detailed_info', args={}, result=
[ToolCallingRecord(func_name='get_author_detailed_info', args={}, result=
{'container_type': 'Author', 'filled': ['basics', 'indices', 'counts',
'coauthors', 'publications', 'public_access'], 'scholar_id': 'JicYPdAAAAAJ',
'source': <AuthorSource.AUTHOR_PROFILE_PAGE: 'AUTHOR_PROFILE_PAGE'>, 'name':
Expand Down Expand Up @@ -98,7 +98,7 @@
print(str(response.info['tool_calls'])[:1000])
"""
===============================================================================
[FunctionCallingRecord(func_name='get_author_publications', args={}, result=
[ToolCallingRecord(func_name='get_author_publications', args={}, result=
['Imagenet classification with deep convolutional neural networks', 'Deep
learning', 'Learning internal representations by error-propagation', 'Dropout:
a simple way to prevent neural networks from overfitting', 'Visualizing data
Expand Down Expand Up @@ -127,7 +127,7 @@
print(response.info['tool_calls'])
"""
===============================================================================
[FunctionCallingRecord(func_name='get_publication_by_title', args=
[ToolCallingRecord(func_name='get_publication_by_title', args=
{'publication_title': 'Camel: Communicative agents for" mind" exploration of
large language model society'}, result={'container_type': 'Publication',
'source': <PublicationSource.AUTHOR_PUBLICATION_ENTRY:
Expand Down Expand Up @@ -175,7 +175,7 @@
print((response.info['tool_calls'])[:1000])
"""
===============================================================================
[FunctionCallingRecord(func_name='get_full_paper_content_by_link', args=
[ToolCallingRecord(func_name='get_full_paper_content_by_link', args=
{'pdf_url': 'https://hal.science/hal-04206682/document'}, result='Deep
learning\nYann Lecun, Yoshua Bengio, Geoffrey Hinton\n\nTo cite this
version:\n\nYann Lecun, Yoshua Bengio, Geoffrey Hinton. Deep learning. Nature,
Expand Down
6 changes: 3 additions & 3 deletions examples/toolkits/notion_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
print(str(response.info['tool_calls'])[:1000])
"""
==========================================================================
[FunctionCallingRecord(func_name='list_all_pages', args={}, result=[{'id':
[ToolCallingRecord(func_name='list_all_pages', args={}, result=[{'id':
'12684f56-4caa-8080-be91-d7fb1a5834e3', 'title': 'test page'},
{'id': '47a4fb54-e34b-4b45-9928-aa2802982eb8', 'title': 'Aigentbot'}])]
"""
Expand All @@ -71,7 +71,7 @@
print(str(response.info['tool_calls'])[:1000])
"""
==========================================================================
[FunctionCallingRecord(func_name='get_notion_block_text_content', args=
[ToolCallingRecord(func_name='get_notion_block_text_content', args=
{'block_id': '12684f56-4caa-8080-be91-d7fb1a5834e3'}, result='hellonihao
buhao this is a test par [Needs case added] another par [Needs case added]
A cute cat: https://www.google.com/imgres?q=cat&imgurl=https%3A%2F%2Fi.
Expand All @@ -92,7 +92,7 @@
print(str(response.info['tool_calls'])[:1000])
"""
==========================================================================
[FunctionCallingRecord(func_name='list_all_users', args={}, result=[{'type':
[ToolCallingRecord(func_name='list_all_users', args={}, result=[{'type':
'person', 'name': 'user a', 'workspace': ''}, {'type': 'bot', 'name':
'test', 'workspace': "user a's Notion"}])]
==========================================================================
Expand Down
2 changes: 1 addition & 1 deletion examples/toolkits/openapi_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@
print(response.info['tool_calls'])
"""
===============================================================================
[FunctionCallingRecord(func_name='klarna_productsUsingGET', args={
[ToolCallingRecord(func_name='klarna_productsUsingGET', args={
'q_in_query': 'basketball'}, result={'products': [{'name': 'Wilson Evolution'
, 'url': 'https://www.klarna.com/us/shopping/pl/cl1220/3203801266/Basketball
/Wilson-Evolution/?utm_source=openai&ref-site=openai_plugin', 'price':
Expand Down
Loading

0 comments on commit 00f2db3

Please sign in to comment.