@@ -2,7 +2,7 @@ import { BaseLanguageModel } from 'langchain/base_language'
2
2
import { ICommonObject , IMessage , INode , INodeData , INodeParams } from '../../../src/Interface'
3
3
import { CustomChainHandler , getBaseClasses } from '../../../src/utils'
4
4
import { ConversationalRetrievalQAChain } from 'langchain/chains'
5
- import { AIChatMessage , BaseRetriever , HumanChatMessage } from 'langchain/schema'
5
+ import { AIMessage , BaseRetriever , HumanMessage } from 'langchain/schema'
6
6
import { BaseChatMemory , BufferMemory , ChatMessageHistory } from 'langchain/memory'
7
7
import { PromptTemplate } from 'langchain/prompts'
8
8
@@ -20,6 +20,20 @@ const qa_template = `Use the following pieces of context to answer the question
20
20
Question: {question}
21
21
Helpful Answer:`
22
22
23
+ const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, return the conversation history excerpt that includes any relevant context to the question if it exists and rephrase the follow up question to be a standalone question.
24
+ Chat History:
25
+ {chat_history}
26
+ Follow Up Input: {question}
27
+ Your answer should follow the following format:
28
+ \`\`\`
29
+ Use the following pieces of context to answer the users question.
30
+ If you don't know the answer, just say that you don't know, don't try to make up an answer.
31
+ ----------------
32
+ <Relevant chat history excerpt as context here>
33
+ Standalone question: <Rephrased question here>
34
+ \`\`\`
35
+ Your answer:`
36
+
23
37
class ConversationalRetrievalQAChain_Chains implements INode {
24
38
label : string
25
39
name : string
@@ -49,6 +63,13 @@ class ConversationalRetrievalQAChain_Chains implements INode {
49
63
name : 'vectorStoreRetriever' ,
50
64
type : 'BaseRetriever'
51
65
} ,
66
+ {
67
+ label : 'Memory' ,
68
+ name : 'memory' ,
69
+ type : 'DynamoDBChatMemory | RedisBackedChatMemory | ZepMemory' ,
70
+ optional : true ,
71
+ description : 'If no memory connected, default BufferMemory will be used'
72
+ } ,
52
73
{
53
74
label : 'Return Source Documents' ,
54
75
name : 'returnSourceDocuments' ,
@@ -99,22 +120,33 @@ class ConversationalRetrievalQAChain_Chains implements INode {
99
120
const systemMessagePrompt = nodeData . inputs ?. systemMessagePrompt as string
100
121
const returnSourceDocuments = nodeData . inputs ?. returnSourceDocuments as boolean
101
122
const chainOption = nodeData . inputs ?. chainOption as string
123
+ const memory = nodeData . inputs ?. memory
102
124
103
125
const obj : any = {
104
126
verbose : process . env . DEBUG === 'true' ? true : false ,
105
127
qaChainOptions : {
106
128
type : 'stuff' ,
107
129
prompt : PromptTemplate . fromTemplate ( systemMessagePrompt ? `${ systemMessagePrompt } \n${ qa_template } ` : default_qa_template )
108
130
} ,
109
- memory : new BufferMemory ( {
131
+ questionGeneratorChainOptions : {
132
+ template : CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT
133
+ }
134
+ }
135
+ if ( returnSourceDocuments ) obj . returnSourceDocuments = returnSourceDocuments
136
+ if ( chainOption ) obj . qaChainOptions = { ...obj . qaChainOptions , type : chainOption }
137
+ if ( memory ) {
138
+ memory . inputKey = 'question'
139
+ memory . outputKey = 'text'
140
+ memory . memoryKey = 'chat_history'
141
+ obj . memory = memory
142
+ } else {
143
+ obj . memory = new BufferMemory ( {
110
144
memoryKey : 'chat_history' ,
111
145
inputKey : 'question' ,
112
146
outputKey : 'text' ,
113
147
returnMessages : true
114
148
} )
115
149
}
116
- if ( returnSourceDocuments ) obj . returnSourceDocuments = returnSourceDocuments
117
- if ( chainOption ) obj . qaChainOptions = { ...obj . qaChainOptions , type : chainOption }
118
150
119
151
const chain = ConversationalRetrievalQAChain . fromLLM ( model , vectorStoreRetriever , obj )
120
152
return chain
@@ -123,6 +155,8 @@ class ConversationalRetrievalQAChain_Chains implements INode {
123
155
async run ( nodeData : INodeData , input : string , options : ICommonObject ) : Promise < string | ICommonObject > {
124
156
const chain = nodeData . instance as ConversationalRetrievalQAChain
125
157
const returnSourceDocuments = nodeData . inputs ?. returnSourceDocuments as boolean
158
+ const memory = nodeData . inputs ?. memory
159
+
126
160
let model = nodeData . inputs ?. model
127
161
128
162
// Temporary fix: https://github.com/hwchase17/langchainjs/issues/754
@@ -131,16 +165,17 @@ class ConversationalRetrievalQAChain_Chains implements INode {
131
165
132
166
const obj = { question : input }
133
167
134
- if ( chain . memory && options && options . chatHistory ) {
168
+ // If external memory like Zep, Redis is being used, ignore below
169
+ if ( ! memory && chain . memory && options && options . chatHistory ) {
135
170
const chatHistory = [ ]
136
171
const histories : IMessage [ ] = options . chatHistory
137
172
const memory = chain . memory as BaseChatMemory
138
173
139
174
for ( const message of histories ) {
140
175
if ( message . type === 'apiMessage' ) {
141
- chatHistory . push ( new AIChatMessage ( message . message ) )
176
+ chatHistory . push ( new AIMessage ( message . message ) )
142
177
} else if ( message . type === 'userMessage' ) {
143
- chatHistory . push ( new HumanChatMessage ( message . message ) )
178
+ chatHistory . push ( new HumanMessage ( message . message ) )
144
179
}
145
180
}
146
181
memory . chatHistory = new ChatMessageHistory ( chatHistory )
0 commit comments