Skip to content

Commit 99e27e2

Browse files
liuxukun2000Satan
and
Satan
authored
Fix React (#41)
* add log mode, refine doc * fix react --------- Co-authored-by: Satan <liuxk2019@mail.sustech.edu.cn>
1 parent b3f9b17 commit 99e27e2

File tree

3 files changed

+11
-9
lines changed

3 files changed

+11
-9
lines changed

Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
# from the environment for the first two.
66
SPHINXOPTS ?=
77
SPHINXBUILD ?= sphinx-build
8-
SOURCEDIR = source
8+
SOURCEDIR = doc/source
99
BUILDDIR = build
1010

1111
# Put it first so that "make" without argument is like "make help".

gentopia/agent/react/agent.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,7 @@ def run(self, instruction, max_iterations=10):
148148
:return: AgentOutput object.
149149
:rtype: AgentOutput
150150
"""
151+
self.intermediate_steps.clear()
151152
logging.info(f"Running {self.name + ':' + self.version} with instruction: {instruction}")
152153
total_cost = 0.0
153154
total_token = 0
@@ -158,8 +159,8 @@ def run(self, instruction, max_iterations=10):
158159
logging.info(f"Prompt: {prompt}")
159160
response = self.llm.completion(prompt, stop=["Observation:"])
160161
if response.state == "error":
161-
print("Planner failed to retrieve response from LLM")
162-
raise ValueError("Planner failed to retrieve response from LLM")
162+
print("Failed to retrieve response from LLM")
163+
raise ValueError("Failed to retrieve response from LLM")
163164

164165
logging.info(f"Response: {response.content}")
165166
total_cost += calculate_cost(self.llm.model_name, response.prompt_token,
@@ -180,7 +181,7 @@ def run(self, instruction, max_iterations=10):
180181
self.intermediate_steps[-1].append(result)
181182
return AgentOutput(output=response.content, cost=total_cost, token_usage=total_token)
182183

183-
def stream(self, instruction: Optional[str] = None, output: Optional[BaseOutput] = None):
184+
def stream(self, instruction: Optional[str] = None, output: Optional[BaseOutput] = None, max_iterations: int = 10):
184185
"""
185186
Stream output the agent with the given instruction.
186187
@@ -197,7 +198,7 @@ def stream(self, instruction: Optional[str] = None, output: Optional[BaseOutput]
197198
if output is None:
198199
output = BaseOutput()
199200
output.thinking(self.name)
200-
for _ in range(10):
201+
for _ in range(max_iterations):
201202

202203
prompt = self._compose_prompt(instruction)
203204
logging.info(f"Prompt: {prompt}")
@@ -208,8 +209,6 @@ def stream(self, instruction: Optional[str] = None, output: Optional[BaseOutput]
208209
for i in response:
209210
content += i.content
210211
output.panel_print(i.content, self.name, True)
211-
212-
# print(i.content)
213212
output.clear()
214213

215214
logging.info(f"Response: {content}")

test.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,11 @@
1616
enable_log(log_level='debug')
1717
dotenv.load_dotenv(".env")
1818

19-
assembler = AgentAssembler(file='configs/memory.yaml')
19+
assembler = AgentAssembler(file='configs/react.yaml')
2020

2121
# # assembler.manager = LocalLLMManager()
2222
agent = assembler.get_agent()
23-
chat(agent)
23+
24+
# chat(agent)
25+
print(agent.run("1+sqrt(33)=?"))
26+
print(agent.run("1+sqrt(35)=?"))

0 commit comments

Comments
 (0)