-
Notifications
You must be signed in to change notification settings - Fork 2.9k
Add MCP support #3672
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Add MCP support #3672
Changes from 2 commits
3cb4891
8dc0215
6c530bc
940b349
fb6392d
d46e27e
5f3942b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,9 +17,10 @@ | |
from datetime import datetime | ||
from json.decoder import JSONDecodeError | ||
from pathlib import Path | ||
from typing import List | ||
from typing import List, Optional, Tuple | ||
|
||
from aider import __version__, models, prompts, urls, utils | ||
from aider.mcp import is_mcp_enabled, get_available_tools_prompt, process_llm_tool_requests, stop_mcp_servers | ||
from aider.analytics import Analytics | ||
from aider.commands import Commands | ||
from aider.exceptions import LiteLLMExceptions | ||
|
@@ -957,6 +958,7 @@ def keyboard_interrupt(self): | |
if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh: | ||
self.io.tool_warning("\n\n^C KeyboardInterrupt") | ||
self.event("exit", reason="Control-C") | ||
stop_mcp_servers() | ||
sys.exit() | ||
|
||
self.io.tool_warning("\n\n^C again to exit") | ||
|
@@ -1106,6 +1108,11 @@ def fmt_system_prompt(self, prompt): | |
) | ||
else: | ||
quad_backtick_reminder = "" | ||
|
||
# Add MCP tools information if MCP is enabled | ||
mcp_tools_info = "" | ||
if is_mcp_enabled(): | ||
mcp_tools_info = self.gpt_prompts.mcp_tools_prefix + "\n\n" + get_available_tools_prompt() | ||
|
||
prompt = prompt.format( | ||
fence=self.fence, | ||
|
@@ -1119,6 +1126,10 @@ def fmt_system_prompt(self, prompt): | |
|
||
if self.main_model.system_prompt_prefix: | ||
prompt = self.main_model.system_prompt_prefix + prompt | ||
|
||
# Append MCP tools information to the end of the prompt | ||
if mcp_tools_info: | ||
prompt += "\n\n" + mcp_tools_info | ||
|
||
return prompt | ||
|
||
|
@@ -1455,6 +1466,14 @@ def send_message(self, inp): | |
self.reflected_message = add_rel_files_message | ||
return | ||
|
||
tool_results = self.check_for_tool_calls(content) | ||
for tool_result in tool_results: | ||
if self.reflected_message: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As told already by others: max_reflections is the limit for consecutive tool calls. while message:
self.reflected_message = None
list(self.send_message(message))
if not self.reflected_message:
break
if self.num_reflections >= self.max_reflections:
self.io.tool_warning(f"Only {self.max_reflections} reflections allowed, stopping.")
return
self.num_reflections += 1
message = self.reflected_message There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What should be the correct way to implement that ? Since it behave almost exactly like a diff edit/a file inclusion, There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I would say you could have another field while message:
self.reflected_message = None
list(self.send_message(message))
if not self.reflected_message and not self.mcp_tool_result_message:
break
if self.num_reflections >= self.max_reflections:
self.io.tool_warning(f"Only {self.max_reflections} reflections allowed, stopping.")
return
if self.num_mcp_iterations >= self.max_mcp_iterations:
self.io.tool_warning(f"Only {self.max_mcp_iterations} MCP iterations allowed, stopping.")
return
if self.reflected_message:
self.num_reflections += 1
message = self.reflected_message
if self.mcp_tool_result_message:
self.num_mcp_iterations += 1
message = self.mcp_tool_result_message There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, it seem to work, maybe I'll all the max_mcp_iterations in the configuration too |
||
self.reflected_message += "\n\n" + tool_result | ||
else: | ||
self.reflected_message = tool_result | ||
return | ||
|
||
try: | ||
if self.reply_completed(): | ||
return | ||
|
@@ -1647,6 +1666,13 @@ def get_file_mentions(self, content, ignore_current=False): | |
|
||
return mentioned_rel_fnames | ||
|
||
def check_for_tool_calls(self, content): | ||
"""Process the LLM's response after it's completed.""" | ||
if is_mcp_enabled(): | ||
return process_llm_tool_requests(content, self.io) | ||
else: | ||
return [] | ||
|
||
def check_for_file_mentions(self, content): | ||
mentioned_rel_fnames = self.get_file_mentions(content) | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The 2 sections could be combined to one, right?