@@ -2,8 +2,15 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface'
2
2
import { BaseChatModel } from 'langchain/chat_models/base'
3
3
import { AutoGPT } from 'langchain/experimental/autogpt'
4
4
import { Tool } from 'langchain/tools'
5
+ import { AIMessage , HumanMessage , SystemMessage } from 'langchain/schema'
5
6
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
6
7
import { flatten } from 'lodash'
8
+ import { StructuredTool } from 'langchain/tools'
9
+ import { LLMChain } from 'langchain/chains'
10
+ import { PromptTemplate } from 'langchain/prompts'
11
+
12
+ type ObjectTool = StructuredTool
13
+ const FINISH_NAME = 'finish'
7
14
8
15
class AutoGPT_Agents implements INode {
9
16
label : string
@@ -88,13 +95,107 @@ class AutoGPT_Agents implements INode {
88
95
89
96
async run ( nodeData : INodeData , input : string ) : Promise < string > {
90
97
const executor = nodeData . instance as AutoGPT
98
+ const model = nodeData . inputs ?. model as BaseChatModel
99
+
91
100
try {
101
+ let totalAssistantReply = ''
102
+ executor . run = async ( goals : string [ ] ) : Promise < string | undefined > => {
103
+ const user_input = 'Determine which next command to use, and respond using the format specified above:'
104
+ let loopCount = 0
105
+ while ( loopCount < executor . maxIterations ) {
106
+ loopCount += 1
107
+
108
+ const { text : assistantReply } = await executor . chain . call ( {
109
+ goals,
110
+ user_input,
111
+ memory : executor . memory ,
112
+ messages : executor . fullMessageHistory
113
+ } )
114
+
115
+ // eslint-disable-next-line no-console
116
+ console . log ( '\x1b[92m\x1b[1m\n*****AutoGPT*****\n\x1b[0m\x1b[0m' )
117
+ // eslint-disable-next-line no-console
118
+ console . log ( assistantReply )
119
+ totalAssistantReply += assistantReply + '\n'
120
+ executor . fullMessageHistory . push ( new HumanMessage ( user_input ) )
121
+ executor . fullMessageHistory . push ( new AIMessage ( assistantReply ) )
122
+
123
+ const action = await executor . outputParser . parse ( assistantReply )
124
+ const tools = executor . tools . reduce ( ( acc , tool ) => ( { ...acc , [ tool . name ] : tool } ) , { } as { [ key : string ] : ObjectTool } )
125
+ if ( action . name === FINISH_NAME ) {
126
+ return action . args . response
127
+ }
128
+ let result : string
129
+ if ( action . name in tools ) {
130
+ const tool = tools [ action . name ]
131
+ let observation
132
+ try {
133
+ observation = await tool . call ( action . args )
134
+ } catch ( e ) {
135
+ observation = `Error in args: ${ e } `
136
+ }
137
+ result = `Command ${ tool . name } returned: ${ observation } `
138
+ } else if ( action . name === 'ERROR' ) {
139
+ result = `Error: ${ action . args } . `
140
+ } else {
141
+ result = `Unknown command '${ action . name } '. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format.`
142
+ }
143
+
144
+ let memoryToAdd = `Assistant Reply: ${ assistantReply } \nResult: ${ result } `
145
+ if ( executor . feedbackTool ) {
146
+ const feedback = `\n${ await executor . feedbackTool . call ( 'Input: ' ) } `
147
+ if ( feedback === 'q' || feedback === 'stop' ) {
148
+ return 'EXITING'
149
+ }
150
+ memoryToAdd += feedback
151
+ }
152
+
153
+ const documents = await executor . textSplitter . createDocuments ( [ memoryToAdd ] )
154
+ await executor . memory . addDocuments ( documents )
155
+ executor . fullMessageHistory . push ( new SystemMessage ( result ) )
156
+ }
157
+
158
+ return undefined
159
+ }
160
+
92
161
const res = await executor . run ( [ input ] )
93
- return res || 'I have completed all my tasks.'
162
+
163
+ if ( ! res ) {
164
+ const sentence = `Unfortunately I was not able to complete all the task. Here is the chain of thoughts:`
165
+ return `${ await rephraseString ( sentence , model ) } \n\`\`\`javascript\n${ totalAssistantReply } \n\`\`\`\n`
166
+ }
167
+
168
+ const sentence = `I have completed all my tasks. Here is the chain of thoughts:`
169
+ let writeFilePath = ''
170
+ const writeTool = executor . tools . find ( ( tool ) => tool . name === 'write_file' )
171
+ if ( executor . tools . length && writeTool ) {
172
+ writeFilePath = ( writeTool as any ) . store . basePath
173
+ }
174
+ return `${ await rephraseString (
175
+ sentence ,
176
+ model
177
+ ) } \n\`\`\`javascript\n${ totalAssistantReply } \n\`\`\`\nAnd the final result:\n\`\`\`javascript\n${ res } \n\`\`\`\n${
178
+ writeFilePath
179
+ ? await rephraseString (
180
+ `You can download the final result displayed above, or see if a new file has been successfully written to \`${ writeFilePath } \`` ,
181
+ model
182
+ )
183
+ : ''
184
+ } `
94
185
} catch ( e ) {
95
186
throw new Error ( e )
96
187
}
97
188
}
98
189
}
99
190
191
+ const rephraseString = async ( sentence : string , model : BaseChatModel ) => {
192
+ const promptTemplate = new PromptTemplate ( {
193
+ template : 'You are a helpful Assistant that rephrase a sentence: {sentence}' ,
194
+ inputVariables : [ 'sentence' ]
195
+ } )
196
+ const chain = new LLMChain ( { llm : model , prompt : promptTemplate } )
197
+ const res = await chain . call ( { sentence } )
198
+ return res ?. text
199
+ }
200
+
100
201
module . exports = { nodeClass : AutoGPT_Agents }
0 commit comments