Skip to content

Commit c0672f7

Browse files
authored
Merge pull request #852 from FlowiseAI/feature/AutoGPT
Feature/Update Autogpt
2 parents 0423fc2 + a4f9b75 commit c0672f7

File tree

2 files changed

+103
-2
lines changed

2 files changed

+103
-2
lines changed

packages/components/nodes/agents/AutoGPT/AutoGPT.ts

+102-1
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,15 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface'
22
import { BaseChatModel } from 'langchain/chat_models/base'
33
import { AutoGPT } from 'langchain/experimental/autogpt'
44
import { Tool } from 'langchain/tools'
5+
import { AIMessage, HumanMessage, SystemMessage } from 'langchain/schema'
56
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
67
import { flatten } from 'lodash'
8+
import { StructuredTool } from 'langchain/tools'
9+
import { LLMChain } from 'langchain/chains'
10+
import { PromptTemplate } from 'langchain/prompts'
11+
12+
type ObjectTool = StructuredTool
13+
const FINISH_NAME = 'finish'
714

815
class AutoGPT_Agents implements INode {
916
label: string
@@ -88,13 +95,107 @@ class AutoGPT_Agents implements INode {
8895

8996
async run(nodeData: INodeData, input: string): Promise<string> {
9097
const executor = nodeData.instance as AutoGPT
98+
const model = nodeData.inputs?.model as BaseChatModel
99+
91100
try {
101+
let totalAssistantReply = ''
102+
executor.run = async (goals: string[]): Promise<string | undefined> => {
103+
const user_input = 'Determine which next command to use, and respond using the format specified above:'
104+
let loopCount = 0
105+
while (loopCount < executor.maxIterations) {
106+
loopCount += 1
107+
108+
const { text: assistantReply } = await executor.chain.call({
109+
goals,
110+
user_input,
111+
memory: executor.memory,
112+
messages: executor.fullMessageHistory
113+
})
114+
115+
// eslint-disable-next-line no-console
116+
console.log('\x1b[92m\x1b[1m\n*****AutoGPT*****\n\x1b[0m\x1b[0m')
117+
// eslint-disable-next-line no-console
118+
console.log(assistantReply)
119+
totalAssistantReply += assistantReply + '\n'
120+
executor.fullMessageHistory.push(new HumanMessage(user_input))
121+
executor.fullMessageHistory.push(new AIMessage(assistantReply))
122+
123+
const action = await executor.outputParser.parse(assistantReply)
124+
const tools = executor.tools.reduce((acc, tool) => ({ ...acc, [tool.name]: tool }), {} as { [key: string]: ObjectTool })
125+
if (action.name === FINISH_NAME) {
126+
return action.args.response
127+
}
128+
let result: string
129+
if (action.name in tools) {
130+
const tool = tools[action.name]
131+
let observation
132+
try {
133+
observation = await tool.call(action.args)
134+
} catch (e) {
135+
observation = `Error in args: ${e}`
136+
}
137+
result = `Command ${tool.name} returned: ${observation}`
138+
} else if (action.name === 'ERROR') {
139+
result = `Error: ${action.args}. `
140+
} else {
141+
result = `Unknown command '${action.name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format.`
142+
}
143+
144+
let memoryToAdd = `Assistant Reply: ${assistantReply}\nResult: ${result} `
145+
if (executor.feedbackTool) {
146+
const feedback = `\n${await executor.feedbackTool.call('Input: ')}`
147+
if (feedback === 'q' || feedback === 'stop') {
148+
return 'EXITING'
149+
}
150+
memoryToAdd += feedback
151+
}
152+
153+
const documents = await executor.textSplitter.createDocuments([memoryToAdd])
154+
await executor.memory.addDocuments(documents)
155+
executor.fullMessageHistory.push(new SystemMessage(result))
156+
}
157+
158+
return undefined
159+
}
160+
92161
const res = await executor.run([input])
93-
return res || 'I have completed all my tasks.'
162+
163+
if (!res) {
164+
const sentence = `Unfortunately I was not able to complete all the task. Here is the chain of thoughts:`
165+
return `${await rephraseString(sentence, model)}\n\`\`\`javascript\n${totalAssistantReply}\n\`\`\`\n`
166+
}
167+
168+
const sentence = `I have completed all my tasks. Here is the chain of thoughts:`
169+
let writeFilePath = ''
170+
const writeTool = executor.tools.find((tool) => tool.name === 'write_file')
171+
if (executor.tools.length && writeTool) {
172+
writeFilePath = (writeTool as any).store.basePath
173+
}
174+
return `${await rephraseString(
175+
sentence,
176+
model
177+
)}\n\`\`\`javascript\n${totalAssistantReply}\n\`\`\`\nAnd the final result:\n\`\`\`javascript\n${res}\n\`\`\`\n${
178+
writeFilePath
179+
? await rephraseString(
180+
`You can download the final result displayed above, or see if a new file has been successfully written to \`${writeFilePath}\``,
181+
model
182+
)
183+
: ''
184+
}`
94185
} catch (e) {
95186
throw new Error(e)
96187
}
97188
}
98189
}
99190

191+
const rephraseString = async (sentence: string, model: BaseChatModel) => {
192+
const promptTemplate = new PromptTemplate({
193+
template: 'You are a helpful Assistant that rephrase a sentence: {sentence}',
194+
inputVariables: ['sentence']
195+
})
196+
const chain = new LLMChain({ llm: model, prompt: promptTemplate })
197+
const res = await chain.call({ sentence })
198+
return res?.text
199+
}
200+
100201
module.exports = { nodeClass: AutoGPT_Agents }

packages/server/src/utils/index.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -481,7 +481,7 @@ export const isStartNodeDependOnInput = (startingNodes: IReactFlowNode[], nodes:
481481
if (inputVariables.length > 0) return true
482482
}
483483
}
484-
const whitelistNodeNames = ['vectorStoreToDocument']
484+
const whitelistNodeNames = ['vectorStoreToDocument', 'autoGPT']
485485
for (const node of nodes) {
486486
if (whitelistNodeNames.includes(node.data.name)) return true
487487
}

0 commit comments

Comments
 (0)