change agent/chain with memory to use runnable

This commit is contained in:
Henry 2024-01-08 13:02:56 +00:00 committed by Ilango
parent e104af4346
commit f66c03ab0a
38 changed files with 1752 additions and 1394 deletions

View File

@ -1,11 +1,14 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { initializeAgentExecutorWithOptions, AgentExecutor, InitializeAgentExecutorOptions } from 'langchain/agents'
import { Tool } from 'langchain/tools' import { Tool } from 'langchain/tools'
import { BaseChatMemory } from 'langchain/memory'
import { getBaseClasses, mapChatHistory } from '../../../src/utils'
import { BaseChatModel } from 'langchain/chat_models/base' import { BaseChatModel } from 'langchain/chat_models/base'
import { flatten } from 'lodash' import { flatten } from 'lodash'
import { additionalCallbacks } from '../../../src/handler' import { AgentStep, BaseMessage, ChainValues, AIMessage, HumanMessage } from 'langchain/schema'
import { RunnableSequence } from 'langchain/schema/runnable'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { AgentExecutor } from '../../../src/agents'
import { ChatConversationalAgent } from 'langchain/agents'
import { renderTemplate } from '@langchain/core/prompts'
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
@ -15,6 +18,15 @@ Assistant is constantly learning and improving, and its capabilities are constan
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.` Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.`
const TEMPLATE_TOOL_RESPONSE = `TOOL RESPONSE:
---------------------
{observation}
USER'S INPUT
--------------------
Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else.`
class ConversationalAgent_Agents implements INode { class ConversationalAgent_Agents implements INode {
label: string label: string
name: string name: string
@ -25,8 +37,9 @@ class ConversationalAgent_Agents implements INode {
category: string category: string
baseClasses: string[] baseClasses: string[]
inputs: INodeParams[] inputs: INodeParams[]
sessionId?: string
constructor() { constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Agent' this.label = 'Conversational Agent'
this.name = 'conversationalAgent' this.name = 'conversationalAgent'
this.version = 2.0 this.version = 2.0
@ -43,7 +56,7 @@ class ConversationalAgent_Agents implements INode {
list: true list: true
}, },
{ {
label: 'Language Model', label: 'Chat Model',
name: 'model', name: 'model',
type: 'BaseChatModel' type: 'BaseChatModel'
}, },
@ -62,52 +75,114 @@ class ConversationalAgent_Agents implements INode {
additionalParams: true additionalParams: true
} }
] ]
this.sessionId = fields?.sessionId
} }
async init(nodeData: INodeData): Promise<any> { async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const model = nodeData.inputs?.model as BaseChatModel return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
let tools = nodeData.inputs?.tools as Tool[]
tools = flatten(tools)
const memory = nodeData.inputs?.memory as BaseChatMemory
const systemMessage = nodeData.inputs?.systemMessage as string
const obj: InitializeAgentExecutorOptions = {
agentType: 'chat-conversational-react-description',
verbose: process.env.DEBUG === 'true' ? true : false
}
const agentArgs: any = {}
if (systemMessage) {
agentArgs.systemMessage = systemMessage
}
if (Object.keys(agentArgs).length) obj.agentArgs = agentArgs
const executor = await initializeAgentExecutorWithOptions(tools, model, obj)
executor.memory = memory
return executor
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const executor = nodeData.instance as AgentExecutor const memory = nodeData.inputs?.memory as FlowiseMemory
const memory = nodeData.inputs?.memory as BaseChatMemory const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
if (options && options.chatHistory) {
const chatHistoryClassName = memory.chatHistory.constructor.name
// Only replace when its In-Memory
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
memory.chatHistory = mapChatHistory(options)
executor.memory = memory
}
}
;(executor.memory as any).returnMessages = true // Return true for BaseChatModel
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options)
const result = await executor.call({ input }, [...callbacks]) let res: ChainValues = {}
return result?.output
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
}
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)
return res?.output
} }
} }
const prepareAgent = async (
nodeData: INodeData,
flowObj: { sessionId?: string; chatId?: string; input?: string },
chatHistory: IMessage[] = []
) => {
const model = nodeData.inputs?.model as BaseChatModel
let tools = nodeData.inputs?.tools as Tool[]
tools = flatten(tools)
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
/** Bind a stop token to the model */
const modelWithStop = model.bind({
stop: ['\nObservation']
})
const outputParser = ChatConversationalAgent.getDefaultOutputParser({
llm: model,
toolNames: tools.map((tool) => tool.name)
})
const prompt = ChatConversationalAgent.createPrompt(tools, {
systemMessage: systemMessage ? systemMessage : DEFAULT_PREFIX,
outputParser
})
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: async (i: { input: string; steps: AgentStep[] }) => await constructScratchPad(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
return messages ?? []
}
},
prompt,
modelWithStop,
outputParser
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false
})
return executor
}
const constructScratchPad = async (steps: AgentStep[]): Promise<BaseMessage[]> => {
const thoughts: BaseMessage[] = []
for (const step of steps) {
thoughts.push(new AIMessage(step.action.log))
thoughts.push(
new HumanMessage(
renderTemplate(TEMPLATE_TOOL_RESPONSE, 'f-string', {
observation: step.observation
})
)
)
}
return thoughts
}
module.exports = { nodeClass: ConversationalAgent_Agents } module.exports = { nodeClass: ConversationalAgent_Agents }

View File

@ -1,9 +1,14 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ChainValues, AgentStep, BaseMessage } from 'langchain/schema'
import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents'
import { getBaseClasses, mapChatHistory } from '../../../src/utils'
import { flatten } from 'lodash' import { flatten } from 'lodash'
import { BaseChatMemory } from 'langchain/memory' import { ChatOpenAI } from 'langchain/chat_models/openai'
import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
import { formatToOpenAIFunction } from 'langchain/tools'
import { RunnableSequence } from 'langchain/schema/runnable'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.` const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`
@ -17,8 +22,9 @@ class ConversationalRetrievalAgent_Agents implements INode {
category: string category: string
baseClasses: string[] baseClasses: string[]
inputs: INodeParams[] inputs: INodeParams[]
sessionId?: string
constructor() { constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Retrieval Agent' this.label = 'Conversational Retrieval Agent'
this.name = 'conversationalRetrievalAgent' this.name = 'conversationalRetrievalAgent'
this.version = 3.0 this.version = 3.0
@ -54,55 +60,96 @@ class ConversationalRetrievalAgent_Agents implements INode {
additionalParams: true additionalParams: true
} }
] ]
this.sessionId = fields?.sessionId
} }
async init(nodeData: INodeData): Promise<any> { async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const model = nodeData.inputs?.model return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const memory = nodeData.inputs?.memory as BaseChatMemory
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: 'openai-functions',
verbose: process.env.DEBUG === 'true' ? true : false,
agentArgs: {
prefix: systemMessage ?? defaultMessage
},
returnIntermediateSteps: true
})
executor.memory = memory
return executor
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const executor = nodeData.instance as AgentExecutor const memory = nodeData.inputs?.memory as FlowiseMemory
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
if (executor.memory) {
;(executor.memory as any).memoryKey = 'chat_history'
;(executor.memory as any).outputKey = 'output'
;(executor.memory as any).returnMessages = true
const chatHistoryClassName = (executor.memory as any).chatHistory.constructor.name
// Only replace when its In-Memory
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
;(executor.memory as any).chatHistory = mapChatHistory(options)
}
}
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
if (options.socketIO && options.socketIOClientId) { if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
const result = await executor.call({ input }, [loggerHandler, handler, ...callbacks]) res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
return result?.output
} else { } else {
const result = await executor.call({ input }, [loggerHandler, ...callbacks]) res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
return result?.output
} }
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res?.output,
type: 'apiMessage'
}
],
this.sessionId
)
return res?.output
} }
} }
const prepareAgent = (
nodeData: INodeData,
flowObj: { sessionId?: string; chatId?: string; input?: string },
chatHistory: IMessage[] = []
) => {
const model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prompt = ChatPromptTemplate.fromMessages([
['ai', systemMessage ? systemMessage : defaultMessage],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
])
const modelWithFunctions = model.bind({
functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
})
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
return messages ?? []
}
},
prompt,
modelWithFunctions,
new OpenAIFunctionsAgentOutputParser()
])
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
returnIntermediateSteps: true,
verbose: process.env.DEBUG === 'true' ? true : false
})
return executor
}
module.exports = { nodeClass: ConversationalRetrievalAgent_Agents } module.exports = { nodeClass: ConversationalRetrievalAgent_Agents }

View File

@ -96,45 +96,51 @@ class OpenAIAssistant_Agents implements INode {
return null return null
} }
//@ts-ignore async clearChatMessages(nodeData: INodeData, options: ICommonObject, sessionIdObj: { type: string; id: string }): Promise<void> {
memoryMethods = { const selectedAssistantId = nodeData.inputs?.selectedAssistant as string
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> { const appDataSource = options.appDataSource as DataSource
const selectedAssistantId = nodeData.inputs?.selectedAssistant as string const databaseEntities = options.databaseEntities as IDatabaseEntity
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
let sessionId = nodeData.inputs?.sessionId as string
const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({ const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({
id: selectedAssistantId id: selectedAssistantId
})
if (!assistant) {
options.logger.error(`Assistant ${selectedAssistantId} not found`)
return
}
if (!sessionIdObj) return
let sessionId = ''
if (sessionIdObj.type === 'chatId') {
const chatId = sessionIdObj.id
const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({
chatId
}) })
if (!chatmsg) {
if (!assistant) { options.logger.error(`Chat Message with Chat Id: ${chatId} not found`)
options.logger.error(`Assistant ${selectedAssistantId} not found`)
return return
} }
sessionId = chatmsg.sessionId
} else if (sessionIdObj.type === 'threadId') {
sessionId = sessionIdObj.id
}
if (!sessionId && options.chatId) { const credentialData = await getCredentialData(assistant.credential ?? '', options)
const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({ const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
chatId: options.chatId if (!openAIApiKey) {
}) options.logger.error(`OpenAI ApiKey not found`)
if (!chatmsg) { return
options.logger.error(`Chat Message with Chat Id: ${options.chatId} not found`) }
return
}
sessionId = chatmsg.sessionId
}
const credentialData = await getCredentialData(assistant.credential ?? '', options) const openai = new OpenAI({ apiKey: openAIApiKey })
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
if (!openAIApiKey) { try {
options.logger.error(`OpenAI ApiKey not found`)
return
}
const openai = new OpenAI({ apiKey: openAIApiKey })
options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
if (sessionId) await openai.beta.threads.del(sessionId) if (sessionId) await openai.beta.threads.del(sessionId)
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`) options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
} catch (e) {
throw new Error(e)
} }
} }
@ -297,7 +303,11 @@ class OpenAIAssistant_Agents implements INode {
options.socketIO.to(options.socketIOClientId).emit('tool', tool.name) options.socketIO.to(options.socketIOClientId).emit('tool', tool.name)
try { try {
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, threadId) const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, {
sessionId: threadId,
chatId: options.chatId,
input
})
await analyticHandlers.onToolEnd(toolIds, toolOutput) await analyticHandlers.onToolEnd(toolIds, toolOutput)
submitToolOutputs.push({ submitToolOutputs.push({
tool_call_id: actions[i].toolCallId, tool_call_id: actions[i].toolCallId,
@ -462,6 +472,7 @@ class OpenAIAssistant_Agents implements INode {
const imageRegex = /<img[^>]*\/>/g const imageRegex = /<img[^>]*\/>/g
let llmOutput = returnVal.replace(imageRegex, '') let llmOutput = returnVal.replace(imageRegex, '')
llmOutput = llmOutput.replace('<br/>', '') llmOutput = llmOutput.replace('<br/>', '')
await analyticHandlers.onLLMEnd(llmIds, llmOutput) await analyticHandlers.onLLMEnd(llmIds, llmOutput)
await analyticHandlers.onChainEnd(parentIds, messageData, true) await analyticHandlers.onChainEnd(parentIds, messageData, true)

View File

@ -1,17 +1,14 @@
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ChainValues, AgentStep, BaseMessage } from 'langchain/schema'
import { AgentExecutor as LCAgentExecutor, AgentExecutorInput } from 'langchain/agents'
import { ChainValues, AgentStep, AgentFinish, AgentAction, BaseMessage, FunctionMessage, AIMessage } from 'langchain/schema'
import { OutputParserException } from 'langchain/schema/output_parser'
import { CallbackManagerForChainRun } from 'langchain/callbacks'
import { formatToOpenAIFunction } from 'langchain/tools'
import { ToolInputParsingException, Tool } from '@langchain/core/tools'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { flatten } from 'lodash' import { flatten } from 'lodash'
import { RunnableSequence } from 'langchain/schema/runnable' import { RunnableSequence } from 'langchain/schema/runnable'
import { formatToOpenAIFunction } from 'langchain/tools'
import { ChatOpenAI } from 'langchain/chat_models/openai'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts' import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
import { ChatOpenAI } from 'langchain/chat_models/openai'
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser' import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
class OpenAIFunctionAgent_Agents implements INode { class OpenAIFunctionAgent_Agents implements INode {
label: string label: string
@ -25,7 +22,7 @@ class OpenAIFunctionAgent_Agents implements INode {
inputs: INodeParams[] inputs: INodeParams[]
sessionId?: string sessionId?: string
constructor(fields: { sessionId?: string }) { constructor(fields?: { sessionId?: string }) {
this.label = 'OpenAI Function Agent' this.label = 'OpenAI Function Agent'
this.name = 'openAIFunctionAgent' this.name = 'openAIFunctionAgent'
this.version = 3.0 this.version = 3.0
@ -33,7 +30,7 @@ class OpenAIFunctionAgent_Agents implements INode {
this.category = 'Agents' this.category = 'Agents'
this.icon = 'function.svg' this.icon = 'function.svg'
this.description = `An agent that uses Function Calling to pick the tool and args to call` this.description = `An agent that uses Function Calling to pick the tool and args to call`
this.baseClasses = [this.type, ...getBaseClasses(LCAgentExecutor)] this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [ this.inputs = [
{ {
label: 'Allowed Tools', label: 'Allowed Tools',
@ -63,19 +60,13 @@ class OpenAIFunctionAgent_Agents implements INode {
this.sessionId = fields?.sessionId this.sessionId = fields?.sessionId
} }
async init(nodeData: INodeData): Promise<any> { async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const memory = nodeData.inputs?.memory as FlowiseMemory return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const executor = prepareAgent(nodeData, this.sessionId)
if (memory) executor.memory = memory
return executor
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const memory = nodeData.inputs?.memory as FlowiseMemory const memory = nodeData.inputs?.memory as FlowiseMemory
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const executor = prepareAgent(nodeData, this.sessionId)
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options)
@ -107,17 +98,11 @@ class OpenAIFunctionAgent_Agents implements INode {
} }
} }
const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] => const prepareAgent = (
steps.flatMap(({ action, observation }) => { nodeData: INodeData,
if ('messageLog' in action && action.messageLog !== undefined) { flowObj: { sessionId?: string; chatId?: string; input?: string },
const log = action.messageLog as BaseMessage[] chatHistory: IMessage[] = []
return log.concat(new FunctionMessage(observation, action.tool)) ) => {
} else {
return [new AIMessage(action.log)]
}
})
const prepareAgent = (nodeData: INodeData, sessionId?: string) => {
const model = nodeData.inputs?.model as ChatOpenAI const model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string const systemMessage = nodeData.inputs?.systemMessage as string
@ -142,7 +127,7 @@ const prepareAgent = (nodeData: INodeData, sessionId?: string) => {
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input, [inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps), agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => { [memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
const messages = (await memory.getChatMessages(sessionId, true)) as BaseMessage[] const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
return messages ?? [] return messages ?? []
} }
}, },
@ -154,231 +139,13 @@ const prepareAgent = (nodeData: INodeData, sessionId?: string) => {
const executor = AgentExecutor.fromAgentAndTools({ const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent, agent: runnableAgent,
tools, tools,
sessionId sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false
}) })
return executor return executor
} }
type AgentExecutorOutput = ChainValues
class AgentExecutor extends LCAgentExecutor {
sessionId?: string
static fromAgentAndTools(fields: AgentExecutorInput & { sessionId?: string }): AgentExecutor {
const newInstance = new AgentExecutor(fields)
if (fields.sessionId) newInstance.sessionId = fields.sessionId
return newInstance
}
shouldContinueIteration(iterations: number): boolean {
return this.maxIterations === undefined || iterations < this.maxIterations
}
async _call(inputs: ChainValues, runManager?: CallbackManagerForChainRun): Promise<AgentExecutorOutput> {
const toolsByName = Object.fromEntries(this.tools.map((t) => [t.name.toLowerCase(), t]))
const steps: AgentStep[] = []
let iterations = 0
const getOutput = async (finishStep: AgentFinish): Promise<AgentExecutorOutput> => {
const { returnValues } = finishStep
const additional = await this.agent.prepareForOutput(returnValues, steps)
if (this.returnIntermediateSteps) {
return { ...returnValues, intermediateSteps: steps, ...additional }
}
await runManager?.handleAgentEnd(finishStep)
return { ...returnValues, ...additional }
}
while (this.shouldContinueIteration(iterations)) {
let output
try {
output = await this.agent.plan(steps, inputs, runManager?.getChild())
} catch (e) {
if (e instanceof OutputParserException) {
let observation
let text = e.message
if (this.handleParsingErrors === true) {
if (e.sendToLLM) {
observation = e.observation
text = e.llmOutput ?? ''
} else {
observation = 'Invalid or incomplete response'
}
} else if (typeof this.handleParsingErrors === 'string') {
observation = this.handleParsingErrors
} else if (typeof this.handleParsingErrors === 'function') {
observation = this.handleParsingErrors(e)
} else {
throw e
}
output = {
tool: '_Exception',
toolInput: observation,
log: text
} as AgentAction
} else {
throw e
}
}
// Check if the agent has finished
if ('returnValues' in output) {
return getOutput(output)
}
let actions: AgentAction[]
if (Array.isArray(output)) {
actions = output as AgentAction[]
} else {
actions = [output as AgentAction]
}
const newSteps = await Promise.all(
actions.map(async (action) => {
await runManager?.handleAgentAction(action)
const tool = action.tool === '_Exception' ? new ExceptionTool() : toolsByName[action.tool?.toLowerCase()]
let observation
try {
// here we need to override Tool call method to include sessionId as parameter
observation = tool
? // @ts-ignore
await tool.call(action.toolInput, runManager?.getChild(), undefined, this.sessionId)
: `${action.tool} is not a valid tool, try another one.`
} catch (e) {
if (e instanceof ToolInputParsingException) {
if (this.handleParsingErrors === true) {
observation = 'Invalid or incomplete tool input. Please try again.'
} else if (typeof this.handleParsingErrors === 'string') {
observation = this.handleParsingErrors
} else if (typeof this.handleParsingErrors === 'function') {
observation = this.handleParsingErrors(e)
} else {
throw e
}
observation = await new ExceptionTool().call(observation, runManager?.getChild())
return { action, observation: observation ?? '' }
}
}
return { action, observation: observation ?? '' }
})
)
steps.push(...newSteps)
const lastStep = steps[steps.length - 1]
const lastTool = toolsByName[lastStep.action.tool?.toLowerCase()]
if (lastTool?.returnDirect) {
return getOutput({
returnValues: { [this.agent.returnValues[0]]: lastStep.observation },
log: ''
})
}
iterations += 1
}
const finish = await this.agent.returnStoppedResponse(this.earlyStoppingMethod, steps, inputs)
return getOutput(finish)
}
async _takeNextStep(
nameToolMap: Record<string, Tool>,
inputs: ChainValues,
intermediateSteps: AgentStep[],
runManager?: CallbackManagerForChainRun
): Promise<AgentFinish | AgentStep[]> {
let output
try {
output = await this.agent.plan(intermediateSteps, inputs, runManager?.getChild())
} catch (e) {
if (e instanceof OutputParserException) {
let observation
let text = e.message
if (this.handleParsingErrors === true) {
if (e.sendToLLM) {
observation = e.observation
text = e.llmOutput ?? ''
} else {
observation = 'Invalid or incomplete response'
}
} else if (typeof this.handleParsingErrors === 'string') {
observation = this.handleParsingErrors
} else if (typeof this.handleParsingErrors === 'function') {
observation = this.handleParsingErrors(e)
} else {
throw e
}
output = {
tool: '_Exception',
toolInput: observation,
log: text
} as AgentAction
} else {
throw e
}
}
if ('returnValues' in output) {
return output
}
let actions: AgentAction[]
if (Array.isArray(output)) {
actions = output as AgentAction[]
} else {
actions = [output as AgentAction]
}
const result: AgentStep[] = []
for (const agentAction of actions) {
let observation = ''
if (runManager) {
await runManager?.handleAgentAction(agentAction)
}
if (agentAction.tool in nameToolMap) {
const tool = nameToolMap[agentAction.tool]
try {
// here we need to override Tool call method to include sessionId as parameter
// @ts-ignore
observation = await tool.call(agentAction.toolInput, runManager?.getChild(), undefined, this.sessionId)
} catch (e) {
if (e instanceof ToolInputParsingException) {
if (this.handleParsingErrors === true) {
observation = 'Invalid or incomplete tool input. Please try again.'
} else if (typeof this.handleParsingErrors === 'string') {
observation = this.handleParsingErrors
} else if (typeof this.handleParsingErrors === 'function') {
observation = this.handleParsingErrors(e)
} else {
throw e
}
observation = await new ExceptionTool().call(observation, runManager?.getChild())
}
}
} else {
observation = `${agentAction.tool} is not a valid tool, try another available tool: ${Object.keys(nameToolMap).join(', ')}`
}
result.push({
action: agentAction,
observation
})
}
return result
}
}
class ExceptionTool extends Tool {
name = '_Exception'
description = 'Exception tool'
async _call(query: string) {
return query
}
}
module.exports = { nodeClass: OpenAIFunctionAgent_Agents } module.exports = { nodeClass: OpenAIFunctionAgent_Agents }

View File

@ -1,14 +1,16 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConversationChain } from 'langchain/chains' import { ConversationChain } from 'langchain/chains'
import { getBaseClasses, mapChatHistory } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
import { BufferMemory } from 'langchain/memory'
import { BaseChatModel } from 'langchain/chat_models/base' import { BaseChatModel } from 'langchain/chat_models/base'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { flatten } from 'lodash' import { flatten } from 'lodash'
import { Document } from 'langchain/document' import { Document } from 'langchain/document'
import { RunnableSequence } from 'langchain/schema/runnable'
import { StringOutputParser } from 'langchain/schema/output_parser'
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
const inputKey = 'input'
class ConversationChain_Chains implements INode { class ConversationChain_Chains implements INode {
label: string label: string
@ -20,8 +22,9 @@ class ConversationChain_Chains implements INode {
baseClasses: string[] baseClasses: string[]
description: string description: string
inputs: INodeParams[] inputs: INodeParams[]
sessionId?: string
constructor() { constructor(fields?: { sessionId?: string }) {
this.label = 'Conversation Chain' this.label = 'Conversation Chain'
this.name = 'conversationChain' this.name = 'conversationChain'
this.version = 1.0 this.version = 1.0
@ -32,7 +35,7 @@ class ConversationChain_Chains implements INode {
this.baseClasses = [this.type, ...getBaseClasses(ConversationChain)] this.baseClasses = [this.type, ...getBaseClasses(ConversationChain)]
this.inputs = [ this.inputs = [
{ {
label: 'Language Model', label: 'Chat Model',
name: 'model', name: 'model',
type: 'BaseChatModel' type: 'BaseChatModel'
}, },
@ -60,76 +63,99 @@ class ConversationChain_Chains implements INode {
placeholder: 'You are a helpful assistant that write codes' placeholder: 'You are a helpful assistant that write codes'
} }
] ]
this.sessionId = fields?.sessionId
} }
async init(nodeData: INodeData): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const model = nodeData.inputs?.model as BaseChatModel const chain = prepareChain(nodeData, this.sessionId, options.chatHistory)
const memory = nodeData.inputs?.memory as BufferMemory
const prompt = nodeData.inputs?.systemMessagePrompt as string
const docs = nodeData.inputs?.document as Document[]
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
let finalText = ''
for (let i = 0; i < finalDocs.length; i += 1) {
finalText += finalDocs[i].pageContent
}
const replaceChar: string[] = ['{', '}']
for (const char of replaceChar) finalText = finalText.replaceAll(char, '')
if (finalText) systemMessage = `${systemMessage}\nThe AI has the following context:\n${finalText}`
const obj: any = {
llm: model,
memory,
verbose: process.env.DEBUG === 'true' ? true : false
}
const chatPrompt = ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(prompt ? `${prompt}\n${systemMessage}` : systemMessage),
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
HumanMessagePromptTemplate.fromTemplate('{input}')
])
obj.prompt = chatPrompt
const chain = new ConversationChain(obj)
return chain return chain
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const chain = nodeData.instance as ConversationChain const memory = nodeData.inputs?.memory
const memory = nodeData.inputs?.memory as BufferMemory const chain = prepareChain(nodeData, this.sessionId, options.chatHistory)
memory.returnMessages = true // Return true for BaseChatModel
if (options && options.chatHistory) {
const chatHistoryClassName = memory.chatHistory.constructor.name
// Only replace when its In-Memory
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
memory.chatHistory = mapChatHistory(options)
}
}
chain.memory = memory
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options)
let res = ''
if (options.socketIO && options.socketIOClientId) { if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
const res = await chain.call({ input }, [loggerHandler, handler, ...callbacks]) res = await chain.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
return res?.response
} else { } else {
const res = await chain.call({ input }, [loggerHandler, ...callbacks]) res = await chain.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
return res?.response
} }
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: res,
type: 'apiMessage'
}
],
this.sessionId
)
return res
} }
} }
const prepareChatPrompt = (nodeData: INodeData) => {
const memory = nodeData.inputs?.memory as FlowiseMemory
const prompt = nodeData.inputs?.systemMessagePrompt as string
const docs = nodeData.inputs?.document as Document[]
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
let finalText = ''
for (let i = 0; i < finalDocs.length; i += 1) {
finalText += finalDocs[i].pageContent
}
const replaceChar: string[] = ['{', '}']
for (const char of replaceChar) finalText = finalText.replaceAll(char, '')
if (finalText) systemMessage = `${systemMessage}\nThe AI has the following context:\n${finalText}`
const chatPrompt = ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(prompt ? `${prompt}\n${systemMessage}` : systemMessage),
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
])
return chatPrompt
}
const prepareChain = (nodeData: INodeData, sessionId?: string, chatHistory: IMessage[] = []) => {
const model = nodeData.inputs?.model as BaseChatModel
const memory = nodeData.inputs?.memory as FlowiseMemory
const memoryKey = memory.memoryKey ?? 'chat_history'
const conversationChain = RunnableSequence.from([
{
[inputKey]: (input: { input: string }) => input.input,
[memoryKey]: async () => {
const history = await memory.getChatMessages(sessionId, true, chatHistory)
return history
}
},
prepareChatPrompt(nodeData),
model,
new StringOutputParser()
])
return conversationChain
}
module.exports = { nodeClass: ConversationChain_Chains } module.exports = { nodeClass: ConversationChain_Chains }

View File

@ -1,20 +1,25 @@
import { BaseLanguageModel } from 'langchain/base_language' import { BaseLanguageModel } from 'langchain/base_language'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConversationalRetrievalQAChain } from 'langchain/chains'
import { getBaseClasses, mapChatHistory } from '../../../src/utils'
import { ConversationalRetrievalQAChain, QAChainParams } from 'langchain/chains'
import { BaseRetriever } from 'langchain/schema/retriever' import { BaseRetriever } from 'langchain/schema/retriever'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { BufferMemoryInput } from 'langchain/memory'
import { PromptTemplate } from 'langchain/prompts' import { PromptTemplate } from 'langchain/prompts'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { QA_TEMPLATE, REPHRASE_TEMPLATE, RESPONSE_TEMPLATE } from './prompts'
import { import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from 'langchain/schema/runnable'
default_map_reduce_template, import { BaseMessage, HumanMessage, AIMessage } from 'langchain/schema'
default_qa_template, import { StringOutputParser } from 'langchain/schema/output_parser'
qa_template, import type { Document } from 'langchain/document'
map_reduce_template, import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT, import { applyPatch } from 'fast-json-patch'
refine_question_template, import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
refine_template import { ConsoleCallbackHandler, additionalCallbacks } from '../../../src/handler'
} from './prompts' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
type RetrievalChainInput = {
chat_history: string
question: string
}
const sourceRunnableName = 'FindDocs'
class ConversationalRetrievalQAChain_Chains implements INode { class ConversationalRetrievalQAChain_Chains implements INode {
label: string label: string
@ -26,11 +31,12 @@ class ConversationalRetrievalQAChain_Chains implements INode {
baseClasses: string[] baseClasses: string[]
description: string description: string
inputs: INodeParams[] inputs: INodeParams[]
sessionId?: string
constructor() { constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Retrieval QA Chain' this.label = 'Conversational Retrieval QA Chain'
this.name = 'conversationalRetrievalQAChain' this.name = 'conversationalRetrievalQAChain'
this.version = 1.0 this.version = 2.0
this.type = 'ConversationalRetrievalQAChain' this.type = 'ConversationalRetrievalQAChain'
this.icon = 'qa.svg' this.icon = 'qa.svg'
this.category = 'Chains' this.category = 'Chains'
@ -38,9 +44,9 @@ class ConversationalRetrievalQAChain_Chains implements INode {
this.baseClasses = [this.type, ...getBaseClasses(ConversationalRetrievalQAChain)] this.baseClasses = [this.type, ...getBaseClasses(ConversationalRetrievalQAChain)]
this.inputs = [ this.inputs = [
{ {
label: 'Language Model', label: 'Chat Model',
name: 'model', name: 'model',
type: 'BaseLanguageModel' type: 'BaseChatModel'
}, },
{ {
label: 'Vector Store Retriever', label: 'Vector Store Retriever',
@ -60,6 +66,29 @@ class ConversationalRetrievalQAChain_Chains implements INode {
type: 'boolean', type: 'boolean',
optional: true optional: true
}, },
{
label: 'Rephrase Prompt',
name: 'rephrasePrompt',
type: 'string',
description: 'Using previous chat history, rephrase question into a standalone question',
warning: 'Prompt must include input variables: {chat_history} and {question}',
rows: 4,
additionalParams: true,
optional: true,
default: REPHRASE_TEMPLATE
},
{
label: 'Response Prompt',
name: 'responsePrompt',
type: 'string',
description: 'Taking the rephrased question, search for answer from the provided context',
warning: 'Prompt must include input variable: {context}',
rows: 4,
additionalParams: true,
optional: true,
default: RESPONSE_TEMPLATE
}
/** Deprecated
{ {
label: 'System Message', label: 'System Message',
name: 'systemMessagePrompt', name: 'systemMessagePrompt',
@ -70,6 +99,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
placeholder: placeholder:
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.' 'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
}, },
// TODO: create standalone chains for these 3 modes as they are not compatible with memory
{ {
label: 'Chain Option', label: 'Chain Option',
name: 'chainOption', name: 'chainOption',
@ -95,124 +125,246 @@ class ConversationalRetrievalQAChain_Chains implements INode {
additionalParams: true, additionalParams: true,
optional: true optional: true
} }
*/
] ]
this.sessionId = fields?.sessionId
} }
async init(nodeData: INodeData): Promise<any> { async init(nodeData: INodeData): Promise<any> {
const model = nodeData.inputs?.model as BaseLanguageModel const model = nodeData.inputs?.model as BaseLanguageModel
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean const rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
const chainOption = nodeData.inputs?.chainOption as string const responsePrompt = nodeData.inputs?.responsePrompt as string
const externalMemory = nodeData.inputs?.memory
const obj: any = { let customResponsePrompt = responsePrompt
verbose: process.env.DEBUG === 'true' ? true : false, // If the deprecated systemMessagePrompt is still exists
questionGeneratorChainOptions: { if (systemMessagePrompt) {
template: CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT customResponsePrompt = `${systemMessagePrompt}\n${QA_TEMPLATE}`
}
} }
if (returnSourceDocuments) obj.returnSourceDocuments = returnSourceDocuments const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt)
return answerChain
if (chainOption === 'map_reduce') {
obj.qaChainOptions = {
type: 'map_reduce',
combinePrompt: PromptTemplate.fromTemplate(
systemMessagePrompt ? `${systemMessagePrompt}\n${map_reduce_template}` : default_map_reduce_template
)
} as QAChainParams
} else if (chainOption === 'refine') {
const qprompt = new PromptTemplate({
inputVariables: ['context', 'question'],
template: refine_question_template(systemMessagePrompt)
})
const rprompt = new PromptTemplate({
inputVariables: ['context', 'question', 'existing_answer'],
template: refine_template
})
obj.qaChainOptions = {
type: 'refine',
questionPrompt: qprompt,
refinePrompt: rprompt
} as QAChainParams
} else {
obj.qaChainOptions = {
type: 'stuff',
prompt: PromptTemplate.fromTemplate(systemMessagePrompt ? `${systemMessagePrompt}\n${qa_template}` : default_qa_template)
} as QAChainParams
}
if (externalMemory) {
externalMemory.memoryKey = 'chat_history'
externalMemory.inputKey = 'question'
externalMemory.outputKey = 'text'
externalMemory.returnMessages = true
if (chainOption === 'refine') externalMemory.outputKey = 'output_text'
obj.memory = externalMemory
} else {
const fields: BufferMemoryInput = {
memoryKey: 'chat_history',
inputKey: 'question',
outputKey: 'text',
returnMessages: true
}
if (chainOption === 'refine') fields.outputKey = 'output_text'
obj.memory = new BufferMemory(fields)
}
const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStoreRetriever, obj)
return chain
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const chain = nodeData.instance as ConversationalRetrievalQAChain const model = nodeData.inputs?.model as BaseLanguageModel
const externalMemory = nodeData.inputs?.memory
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
const rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
const responsePrompt = nodeData.inputs?.responsePrompt as string
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const chainOption = nodeData.inputs?.chainOption as string
let model = nodeData.inputs?.model let customResponsePrompt = responsePrompt
// If the deprecated systemMessagePrompt is still exists
// Temporary fix: https://github.com/hwchase17/langchainjs/issues/754 if (systemMessagePrompt) {
model.streaming = false customResponsePrompt = `${systemMessagePrompt}\n${QA_TEMPLATE}`
chain.questionGeneratorChain.llm = model
const obj = { question: input }
if (options && options.chatHistory && chain.memory) {
const chatHistoryClassName = (chain.memory as any).chatHistory.constructor.name
// Only replace when its In-Memory
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
;(chain.memory as any).chatHistory = mapChatHistory(options)
}
} }
let memory: FlowiseMemory | undefined = externalMemory
if (!memory) {
memory = new BufferMemory({
returnMessages: true,
memoryKey: 'chat_history',
inputKey: 'input'
})
}
const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt)
const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? []
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options)
if (options.socketIO && options.socketIOClientId) { const stream = answerChain.streamLog(
const handler = new CustomChainHandler( { question: input, chat_history: history },
options.socketIO, { callbacks: [loggerHandler, ...callbacks] },
options.socketIOClientId, {
chainOption === 'refine' ? 4 : undefined, includeNames: [sourceRunnableName]
returnSourceDocuments }
) )
const res = await chain.call(obj, [loggerHandler, handler, ...callbacks])
if (chainOption === 'refine') { let streamedResponse: Record<string, any> = {}
if (res.output_text && res.sourceDocuments) { let sourceDocuments: ICommonObject[] = []
return { let text = ''
text: res.output_text, let isStreamingStarted = false
sourceDocuments: res.sourceDocuments const isStreamingEnabled = options.socketIO && options.socketIOClientId
}
} for await (const chunk of stream) {
return res?.output_text streamedResponse = applyPatch(streamedResponse, chunk.ops).newDocument
if (streamedResponse.final_output) {
text = streamedResponse.final_output?.output
if (isStreamingEnabled) options.socketIO.to(options.socketIOClientId).emit('end')
if (Array.isArray(streamedResponse?.logs?.[sourceRunnableName]?.final_output?.output)) {
sourceDocuments = streamedResponse?.logs?.[sourceRunnableName]?.final_output?.output
if (isStreamingEnabled && returnSourceDocuments)
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments)
}
}
if (
Array.isArray(streamedResponse?.streamed_output) &&
streamedResponse?.streamed_output.length &&
!streamedResponse.final_output
) {
const token = streamedResponse.streamed_output[streamedResponse.streamed_output.length - 1]
if (!isStreamingStarted) {
isStreamingStarted = true
if (isStreamingEnabled) options.socketIO.to(options.socketIOClientId).emit('start', token)
}
if (isStreamingEnabled) options.socketIO.to(options.socketIOClientId).emit('token', token)
} }
if (res.text && res.sourceDocuments) return res
return res?.text
} else {
const res = await chain.call(obj, [loggerHandler, ...callbacks])
if (res.text && res.sourceDocuments) return res
return res?.text
} }
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: text,
type: 'apiMessage'
}
],
this.sessionId
)
if (returnSourceDocuments) return { text, sourceDocuments }
else return { text }
}
}
const createRetrieverChain = (llm: BaseLanguageModel, retriever: Runnable, rephrasePrompt: string) => {
// Small speed/accuracy optimization: no need to rephrase the first question
// since there shouldn't be any meta-references to prior chat history
const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(rephrasePrompt)
const condenseQuestionChain = RunnableSequence.from([CONDENSE_QUESTION_PROMPT, llm, new StringOutputParser()]).withConfig({
runName: 'CondenseQuestion'
})
const hasHistoryCheckFn = RunnableLambda.from((input: RetrievalChainInput) => input.chat_history.length > 0).withConfig({
runName: 'HasChatHistoryCheck'
})
const conversationChain = condenseQuestionChain.pipe(retriever).withConfig({
runName: 'RetrievalChainWithHistory'
})
const basicRetrievalChain = RunnableLambda.from((input: RetrievalChainInput) => input.question)
.withConfig({
runName: 'Itemgetter:question'
})
.pipe(retriever)
.withConfig({ runName: 'RetrievalChainWithNoHistory' })
return RunnableBranch.from([[hasHistoryCheckFn, conversationChain], basicRetrievalChain]).withConfig({ runName: sourceRunnableName })
}
const formatDocs = (docs: Document[]) => {
return docs.map((doc, i) => `<doc id='${i}'>${doc.pageContent}</doc>`).join('\n')
}
const formatChatHistoryAsString = (history: BaseMessage[]) => {
return history.map((message) => `${message._getType()}: ${message.content}`).join('\n')
}
const serializeHistory = (input: any) => {
const chatHistory: IMessage[] = input.chat_history || []
const convertedChatHistory = []
for (const message of chatHistory) {
if (message.type === 'userMessage') {
convertedChatHistory.push(new HumanMessage({ content: message.message }))
}
if (message.type === 'apiMessage') {
convertedChatHistory.push(new AIMessage({ content: message.message }))
}
}
return convertedChatHistory
}
const createChain = (
llm: BaseLanguageModel,
retriever: Runnable,
rephrasePrompt = REPHRASE_TEMPLATE,
responsePrompt = RESPONSE_TEMPLATE
) => {
const retrieverChain = createRetrieverChain(llm, retriever, rephrasePrompt)
const context = RunnableMap.from({
context: RunnableSequence.from([
({ question, chat_history }) => ({
question,
chat_history: formatChatHistoryAsString(chat_history)
}),
retrieverChain,
RunnableLambda.from(formatDocs).withConfig({
runName: 'FormatDocumentChunks'
})
]),
question: RunnableLambda.from((input: RetrievalChainInput) => input.question).withConfig({
runName: 'Itemgetter:question'
}),
chat_history: RunnableLambda.from((input: RetrievalChainInput) => input.chat_history).withConfig({
runName: 'Itemgetter:chat_history'
})
}).withConfig({ tags: ['RetrieveDocs'] })
const prompt = ChatPromptTemplate.fromMessages([
['system', responsePrompt],
new MessagesPlaceholder('chat_history'),
['human', `{question}`]
])
const responseSynthesizerChain = RunnableSequence.from([prompt, llm, new StringOutputParser()]).withConfig({
tags: ['GenerateResponse']
})
const conversationalQAChain = RunnableSequence.from([
{
question: RunnableLambda.from((input: RetrievalChainInput) => input.question).withConfig({
runName: 'Itemgetter:question'
}),
chat_history: RunnableLambda.from(serializeHistory).withConfig({
runName: 'SerializeHistory'
})
},
context,
responseSynthesizerChain
])
return conversationalQAChain
}
class BufferMemory extends FlowiseMemory implements MemoryMethods {
constructor(fields: BufferMemoryInput) {
super(fields)
}
async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise<IMessage[] | BaseMessage[]> {
await this.chatHistory.clear()
for (const msg of prevHistory) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
const memoryResult = await this.loadMemoryVariables({})
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(): Promise<void> {
// adding chat messages will be done on the fly in getChatMessages()
return
}
async clearChatMessages(): Promise<void> {
await this.clear()
} }
} }

View File

@ -1,64 +1,27 @@
export const default_qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:`
export const qa_template = `Use the following pieces of context to answer the question at the end.
{context}
Question: {question}
Helpful Answer:`
export const default_map_reduce_template = `Given the following extracted parts of a long document and a question, create a final answer.
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
{summaries}
Question: {question}
Helpful Answer:`
export const map_reduce_template = `Given the following extracted parts of a long document and a question, create a final answer.
{summaries}
Question: {question}
Helpful Answer:`
export const refine_question_template = (sysPrompt?: string) => {
let returnPrompt = ''
if (sysPrompt)
returnPrompt = `Context information is below.
---------------------
{context}
---------------------
Given the context information and not prior knowledge, ${sysPrompt}
Answer the question: {question}.
Answer:`
if (!sysPrompt)
returnPrompt = `Context information is below.
---------------------
{context}
---------------------
Given the context information and not prior knowledge, answer the question: {question}.
Answer:`
return returnPrompt
}
export const refine_template = `The original question is as follows: {question}
We have provided an existing answer: {existing_answer}
We have the opportunity to refine the existing answer (only if needed) with some more context below.
------------
{context}
------------
Given the new context, refine the original answer to better answer the question.
If you can't find answer from the context, return the original answer.`
export const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, answer in the same language as the follow up question. include it in the standalone question. export const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, answer in the same language as the follow up question. include it in the standalone question.
Chat History: Chat History:
{chat_history} {chat_history}
Follow Up Input: {question} Follow Up Input: {question}
Standalone question:` Standalone question:`
export const RESPONSE_TEMPLATE = `I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". Using the provided context, answer the user's question to the best of your ability using the resources provided.
If there is nothing in the context relevant to the question at hand, just say "Hmm, I'm not sure" and stop after that. Refuse to answer any question not about the info. Never break character.
------------
{context}
------------
REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm not sure". Don't try to make up an answer. Never break character.`
export const QA_TEMPLATE = `Use the following pieces of context to answer the question at the end.
{context}
Question: {question}
Helpful Answer:`
export const REPHRASE_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone Question:`

View File

@ -1,4 +1,4 @@
import { FlowiseMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' import { FlowiseMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { BaseMessage } from 'langchain/schema' import { BaseMessage } from 'langchain/schema'
@ -55,36 +55,27 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
super(fields) super(fields)
} }
async getChatMessages(_?: string, returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> { async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise<IMessage[] | BaseMessage[]> {
await this.chatHistory.clear()
for (const msg of prevHistory) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
const memoryResult = await this.loadMemoryVariables({}) const memoryResult = await this.loadMemoryVariables({})
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
} }
async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise<void> { async addChatMessages(): Promise<void> {
const input = msgArray.find((msg) => msg.type === 'userMessage') // adding chat messages will be done on the fly in getChatMessages()
const output = msgArray.find((msg) => msg.type === 'apiMessage') return
const inputValues = { [this.inputKey ?? 'input']: input?.text }
const outputValues = { output: output?.text }
await this.saveContext(inputValues, outputValues)
} }
async clearChatMessages(): Promise<void> { async clearChatMessages(): Promise<void> {
await this.clear() await this.clear()
} }
async resumeMessages(messages: IMessage[]): Promise<void> {
// Clear existing chatHistory to avoid duplication
if (messages.length) await this.clear()
// Insert into chatHistory
for (const msg of messages) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
}
} }
module.exports = { nodeClass: BufferMemory_Memory } module.exports = { nodeClass: BufferMemory_Memory }

View File

@ -1,4 +1,4 @@
import { FlowiseWindowMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' import { FlowiseWindowMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
import { BufferWindowMemory, BufferWindowMemoryInput } from 'langchain/memory' import { BufferWindowMemory, BufferWindowMemoryInput } from 'langchain/memory'
import { BaseMessage } from 'langchain/schema' import { BaseMessage } from 'langchain/schema'
@ -67,36 +67,28 @@ class BufferWindowMemoryExtended extends FlowiseWindowMemory implements MemoryMe
super(fields) super(fields)
} }
async getChatMessages(_?: string, returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> { async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise<IMessage[] | BaseMessage[]> {
await this.chatHistory.clear()
// Insert into chatHistory
for (const msg of prevHistory) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
const memoryResult = await this.loadMemoryVariables({}) const memoryResult = await this.loadMemoryVariables({})
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
} }
async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise<void> { async addChatMessages(): Promise<void> {
const input = msgArray.find((msg) => msg.type === 'userMessage') // adding chat messages will be done on the fly in getChatMessages()
const output = msgArray.find((msg) => msg.type === 'apiMessage') return
const inputValues = { [this.inputKey ?? 'input']: input?.text }
const outputValues = { output: output?.text }
await this.saveContext(inputValues, outputValues)
} }
async clearChatMessages(): Promise<void> { async clearChatMessages(): Promise<void> {
await this.clear() await this.clear()
} }
async resumeMessages(messages: IMessage[]): Promise<void> {
// Clear existing chatHistory to avoid duplication
if (messages.length) await this.clear()
// Insert into chatHistory
for (const msg of messages) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
}
} }
module.exports = { nodeClass: BufferWindowMemory_Memory } module.exports = { nodeClass: BufferWindowMemory_Memory }

View File

@ -1,4 +1,4 @@
import { FlowiseSummaryMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' import { FlowiseSummaryMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
import { ConversationSummaryMemory, ConversationSummaryMemoryInput } from 'langchain/memory' import { ConversationSummaryMemory, ConversationSummaryMemoryInput } from 'langchain/memory'
import { BaseLanguageModel } from 'langchain/base_language' import { BaseLanguageModel } from 'langchain/base_language'
@ -66,40 +66,32 @@ class ConversationSummaryMemoryExtended extends FlowiseSummaryMemory implements
super(fields) super(fields)
} }
async getChatMessages(_?: string, returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> { async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise<IMessage[] | BaseMessage[]> {
await this.chatHistory.clear()
this.buffer = ''
for (const msg of prevHistory) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
// Get summary
const chatMessages = await this.chatHistory.getMessages()
this.buffer = chatMessages.length ? await this.predictNewSummary(chatMessages.slice(-2), this.buffer) : ''
const memoryResult = await this.loadMemoryVariables({}) const memoryResult = await this.loadMemoryVariables({})
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
} }
async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise<void> { async addChatMessages(): Promise<void> {
const input = msgArray.find((msg) => msg.type === 'userMessage') // adding chat messages will be done on the fly in getChatMessages()
const output = msgArray.find((msg) => msg.type === 'apiMessage') return
const inputValues = { [this.inputKey ?? 'input']: input?.text }
const outputValues = { output: output?.text }
await this.saveContext(inputValues, outputValues)
} }
async clearChatMessages(): Promise<void> { async clearChatMessages(): Promise<void> {
await this.clear() await this.clear()
} }
async resumeMessages(messages: IMessage[]): Promise<void> {
// Clear existing chatHistory to avoid duplication
if (messages.length) await this.clear()
// Insert into chatHistory
for (const msg of messages) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
// Replace buffer
const chatMessages = await this.chatHistory.getMessages()
this.buffer = await this.predictNewSummary(chatMessages.slice(-2), this.buffer)
}
} }
module.exports = { nodeClass: ConversationSummaryMemory_Memory } module.exports = { nodeClass: ConversationSummaryMemory_Memory }

View File

@ -12,13 +12,7 @@ import {
import { DynamoDBChatMessageHistory } from 'langchain/stores/message/dynamodb' import { DynamoDBChatMessageHistory } from 'langchain/stores/message/dynamodb'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage, BaseMessage } from 'langchain/schema' import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage, BaseMessage } from 'langchain/schema'
import { import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
convertBaseMessagetoIMessage,
getBaseClasses,
getCredentialData,
getCredentialParam,
serializeChatHistory
} from '../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface'
class DynamoDb_Memory implements INode { class DynamoDb_Memory implements INode {
@ -70,7 +64,8 @@ class DynamoDb_Memory implements INode {
label: 'Session ID', label: 'Session ID',
name: 'sessionId', name: 'sessionId',
type: 'string', type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '', default: '',
additionalParams: true, additionalParams: true,
optional: true optional: true
@ -88,25 +83,6 @@ class DynamoDb_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return initalizeDynamoDB(nodeData, options) return initalizeDynamoDB(nodeData, options)
} }
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const dynamodbMemory = await initalizeDynamoDB(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing DynamoDb memory session ${sessionId ? sessionId : chatId}`)
await dynamodbMemory.clear()
options.logger.info(`Successfully cleared DynamoDb memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const dynamodbMemory = await initalizeDynamoDB(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await dynamodbMemory.loadMemoryVariables({})
return serializeChatHistory(memoryResult[key])
}
}
} }
const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => { const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => {
@ -114,17 +90,7 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P
const partitionKey = nodeData.inputs?.partitionKey as string const partitionKey = nodeData.inputs?.partitionKey as string
const region = nodeData.inputs?.region as string const region = nodeData.inputs?.region as string
const memoryKey = nodeData.inputs?.memoryKey as string const memoryKey = nodeData.inputs?.memoryKey as string
const chatId = options.chatId const sessionId = nodeData.inputs?.sessionId as string
let isSessionIdUsingChatMessageId = false
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const accessKeyId = getCredentialParam('accessKey', credentialData, nodeData) const accessKeyId = getCredentialParam('accessKey', credentialData, nodeData)
@ -150,7 +116,6 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P
const memory = new BufferMemoryExtended({ const memory = new BufferMemoryExtended({
memoryKey: memoryKey ?? 'chat_history', memoryKey: memoryKey ?? 'chat_history',
chatHistory: dynamoDb, chatHistory: dynamoDb,
isSessionIdUsingChatMessageId,
sessionId, sessionId,
dynamodbClient: client dynamodbClient: client
}) })
@ -158,7 +123,6 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P
} }
interface BufferMemoryExtendedInput { interface BufferMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
dynamodbClient: DynamoDBClient dynamodbClient: DynamoDBClient
sessionId: string sessionId: string
} }
@ -178,7 +142,6 @@ interface DynamoDBSerializedChatMessage {
} }
class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
isSessionIdUsingChatMessageId = false
sessionId = '' sessionId = ''
dynamodbClient: DynamoDBClient dynamodbClient: DynamoDBClient
@ -306,10 +269,6 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
await this.dynamodbClient.send(new DeleteItemCommand(params)) await this.dynamodbClient.send(new DeleteItemCommand(params))
await this.clear() await this.clear()
} }
async resumeMessages(): Promise<void> {
return
}
} }
module.exports = { nodeClass: DynamoDb_Memory } module.exports = { nodeClass: DynamoDb_Memory }

View File

@ -2,13 +2,7 @@ import { MongoClient, Collection, Document } from 'mongodb'
import { MongoDBChatMessageHistory } from 'langchain/stores/message/mongodb' import { MongoDBChatMessageHistory } from 'langchain/stores/message/mongodb'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, BaseMessage } from 'langchain/schema' import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, BaseMessage } from 'langchain/schema'
import { import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
convertBaseMessagetoIMessage,
getBaseClasses,
getCredentialData,
getCredentialParam,
serializeChatHistory
} from '../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface'
class MongoDB_Memory implements INode { class MongoDB_Memory implements INode {
@ -55,7 +49,8 @@ class MongoDB_Memory implements INode {
label: 'Session Id', label: 'Session Id',
name: 'sessionId', name: 'sessionId',
type: 'string', type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '', default: '',
additionalParams: true, additionalParams: true,
optional: true optional: true
@ -73,42 +68,13 @@ class MongoDB_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return initializeMongoDB(nodeData, options) return initializeMongoDB(nodeData, options)
} }
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const mongodbMemory = await initializeMongoDB(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing MongoDB memory session ${sessionId ? sessionId : chatId}`)
await mongodbMemory.clear()
options.logger.info(`Successfully cleared MongoDB memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const mongodbMemory = await initializeMongoDB(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await mongodbMemory.loadMemoryVariables({})
return serializeChatHistory(memoryResult[key])
}
}
} }
const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => { const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => {
const databaseName = nodeData.inputs?.databaseName as string const databaseName = nodeData.inputs?.databaseName as string
const collectionName = nodeData.inputs?.collectionName as string const collectionName = nodeData.inputs?.collectionName as string
const memoryKey = nodeData.inputs?.memoryKey as string const memoryKey = nodeData.inputs?.memoryKey as string
const chatId = options?.chatId as string const sessionId = nodeData.inputs?.sessionId as string
let isSessionIdUsingChatMessageId = false
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData) const mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData)
@ -149,14 +115,12 @@ const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): P
return new BufferMemoryExtended({ return new BufferMemoryExtended({
memoryKey: memoryKey ?? 'chat_history', memoryKey: memoryKey ?? 'chat_history',
chatHistory: mongoDBChatMessageHistory, chatHistory: mongoDBChatMessageHistory,
isSessionIdUsingChatMessageId,
sessionId, sessionId,
collection collection
}) })
} }
interface BufferMemoryExtendedInput { interface BufferMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
collection: Collection<Document> collection: Collection<Document>
sessionId: string sessionId: string
} }
@ -164,7 +128,6 @@ interface BufferMemoryExtendedInput {
class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
sessionId = '' sessionId = ''
collection: Collection<Document> collection: Collection<Document>
isSessionIdUsingChatMessageId? = false
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
super(fields) super(fields)
@ -221,10 +184,6 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
await this.collection.deleteOne({ sessionId: id }) await this.collection.deleteOne({ sessionId: id })
await this.clear() await this.clear()
} }
async resumeMessages(): Promise<void> {
return
}
} }
module.exports = { nodeClass: MongoDB_Memory } module.exports = { nodeClass: MongoDB_Memory }

View File

@ -1,9 +1,14 @@
import { IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' import { IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject } from '../../../src' import { ICommonObject } from '../../../src'
import { MotorheadMemory, MotorheadMemoryInput, InputValues, MemoryVariables, OutputValues, getBufferString } from 'langchain/memory' import { MotorheadMemory, MotorheadMemoryInput, InputValues, OutputValues } from 'langchain/memory'
import fetch from 'node-fetch' import fetch from 'node-fetch'
import { BaseMessage } from 'langchain/schema' import { AIMessage, BaseMessage, ChatMessage, HumanMessage } from 'langchain/schema'
type MotorheadMessage = {
content: string
role: 'Human' | 'AI'
}
class MotorMemory_Memory implements INode { class MotorMemory_Memory implements INode {
label: string label: string
@ -46,7 +51,8 @@ class MotorMemory_Memory implements INode {
label: 'Session Id', label: 'Session Id',
name: 'sessionId', name: 'sessionId',
type: 'string', type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '', default: '',
additionalParams: true, additionalParams: true,
optional: true optional: true
@ -64,49 +70,19 @@ class MotorMemory_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return initalizeMotorhead(nodeData, options) return initalizeMotorhead(nodeData, options)
} }
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const motorhead = await initalizeMotorhead(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing Motorhead memory session ${sessionId ? sessionId : chatId}`)
await motorhead.clear()
options.logger.info(`Successfully cleared Motorhead memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const motorhead = await initalizeMotorhead(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await motorhead.loadMemoryVariables({})
return getBufferString(memoryResult[key])
}
}
} }
const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): Promise<MotorheadMemory> => { const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): Promise<MotorheadMemory> => {
const memoryKey = nodeData.inputs?.memoryKey as string const memoryKey = nodeData.inputs?.memoryKey as string
const baseURL = nodeData.inputs?.baseURL as string const baseURL = nodeData.inputs?.baseURL as string
const chatId = options?.chatId as string const sessionId = nodeData.inputs?.sessionId as string
let isSessionIdUsingChatMessageId = false
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData) const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
const clientId = getCredentialParam('clientId', credentialData, nodeData) const clientId = getCredentialParam('clientId', credentialData, nodeData)
let obj: MotorheadMemoryInput & MotorheadMemoryExtendedInput = { let obj: MotorheadMemoryInput = {
returnMessages: true, returnMessages: true,
isSessionIdUsingChatMessageId,
sessionId, sessionId,
memoryKey memoryKey
} }
@ -132,23 +108,9 @@ const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject):
return motorheadMemory return motorheadMemory
} }
interface MotorheadMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
}
class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods { class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods {
isSessionIdUsingChatMessageId? = false constructor(fields: MotorheadMemoryInput) {
constructor(fields: MotorheadMemoryInput & MotorheadMemoryExtendedInput) {
super(fields) super(fields)
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
}
async loadMemoryVariables(values: InputValues, overrideSessionId = ''): Promise<MemoryVariables> {
if (overrideSessionId) {
this.sessionId = overrideSessionId
}
return super.loadMemoryVariables({ values })
} }
async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideSessionId = ''): Promise<void> { async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideSessionId = ''): Promise<void> {
@ -180,9 +142,33 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods {
async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
const id = overrideSessionId ?? this.sessionId const id = overrideSessionId ?? this.sessionId
const memoryVariables = await this.loadMemoryVariables({}, id) try {
const baseMessages = memoryVariables[this.memoryKey] const resp = await this.caller.call(fetch, `${this.url}/sessions/${id}/memory`, {
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) //@ts-ignore
signal: this.timeout ? AbortSignal.timeout(this.timeout) : undefined,
headers: this._getHeaders() as ICommonObject,
method: 'GET'
})
const data = await resp.json()
const rawStoredMessages: MotorheadMessage[] = data?.data?.messages ?? []
const baseMessages = rawStoredMessages.reverse().map((message) => {
const { content, role } = message
if (role === 'Human') {
return new HumanMessage(content)
} else if (role === 'AI') {
return new AIMessage(content)
} else {
// default to generic ChatMessage
return new ChatMessage(content, role)
}
})
return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
} catch (error) {
console.error('Error getting session: ', error)
return []
}
} }
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {

View File

@ -1,15 +1,9 @@
import { INode, INodeData, INodeParams, ICommonObject, IMessage, MessageType, FlowiseMemory, MemoryMethods } from '../../../src/Interface' import { Redis } from 'ioredis'
import {
convertBaseMessagetoIMessage,
getBaseClasses,
getCredentialData,
getCredentialParam,
serializeChatHistory
} from '../../../src/utils'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { RedisChatMessageHistory, RedisChatMessageHistoryInput } from 'langchain/stores/message/ioredis' import { RedisChatMessageHistory, RedisChatMessageHistoryInput } from 'langchain/stores/message/ioredis'
import { mapStoredMessageToChatMessage, BaseMessage, AIMessage, HumanMessage } from 'langchain/schema' import { mapStoredMessageToChatMessage, BaseMessage, AIMessage, HumanMessage } from 'langchain/schema'
import { Redis } from 'ioredis' import { INode, INodeData, INodeParams, ICommonObject, MessageType, IMessage, MemoryMethods, FlowiseMemory } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class RedisBackedChatMemory_Memory implements INode { class RedisBackedChatMemory_Memory implements INode {
label: string label: string
@ -44,7 +38,8 @@ class RedisBackedChatMemory_Memory implements INode {
label: 'Session Id', label: 'Session Id',
name: 'sessionId', name: 'sessionId',
type: 'string', type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '', default: '',
additionalParams: true, additionalParams: true,
optional: true optional: true
@ -78,47 +73,19 @@ class RedisBackedChatMemory_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return await initalizeRedis(nodeData, options) return await initalizeRedis(nodeData, options)
} }
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const redis = await initalizeRedis(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing Redis memory session ${sessionId ? sessionId : chatId}`)
await redis.clear()
options.logger.info(`Successfully cleared Redis memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const redis = await initalizeRedis(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await redis.loadMemoryVariables({})
return serializeChatHistory(memoryResult[key])
}
}
} }
const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => { const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => {
const sessionTTL = nodeData.inputs?.sessionTTL as number const sessionTTL = nodeData.inputs?.sessionTTL as number
const memoryKey = nodeData.inputs?.memoryKey as string const memoryKey = nodeData.inputs?.memoryKey as string
const sessionId = nodeData.inputs?.sessionId as string
const windowSize = nodeData.inputs?.windowSize as number const windowSize = nodeData.inputs?.windowSize as number
const chatId = options?.chatId as string
let isSessionIdUsingChatMessageId = false
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const redisUrl = getCredentialParam('redisUrl', credentialData, nodeData) const redisUrl = getCredentialParam('redisUrl', credentialData, nodeData)
let client: Redis let client: Redis
if (!redisUrl || redisUrl === '') { if (!redisUrl || redisUrl === '') {
const username = getCredentialParam('redisCacheUser', credentialData, nodeData) const username = getCredentialParam('redisCacheUser', credentialData, nodeData)
const password = getCredentialParam('redisCachePwd', credentialData, nodeData) const password = getCredentialParam('redisCachePwd', credentialData, nodeData)
@ -153,7 +120,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom
const redisChatMessageHistory = new RedisChatMessageHistory(obj) const redisChatMessageHistory = new RedisChatMessageHistory(obj)
redisChatMessageHistory.getMessages = async (): Promise<BaseMessage[]> => { /*redisChatMessageHistory.getMessages = async (): Promise<BaseMessage[]> => {
const rawStoredMessages = await client.lrange((redisChatMessageHistory as any).sessionId, windowSize ? -windowSize : 0, -1) const rawStoredMessages = await client.lrange((redisChatMessageHistory as any).sessionId, windowSize ? -windowSize : 0, -1)
const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message))
return orderedMessages.map(mapStoredMessageToChatMessage) return orderedMessages.map(mapStoredMessageToChatMessage)
@ -169,44 +136,45 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom
redisChatMessageHistory.clear = async (): Promise<void> => { redisChatMessageHistory.clear = async (): Promise<void> => {
await client.del((redisChatMessageHistory as any).sessionId) await client.del((redisChatMessageHistory as any).sessionId)
} }*/
const memory = new BufferMemoryExtended({ const memory = new BufferMemoryExtended({
memoryKey: memoryKey ?? 'chat_history', memoryKey: memoryKey ?? 'chat_history',
chatHistory: redisChatMessageHistory, chatHistory: redisChatMessageHistory,
isSessionIdUsingChatMessageId,
sessionId, sessionId,
windowSize,
redisClient: client redisClient: client
}) })
return memory return memory
} }
interface BufferMemoryExtendedInput { interface BufferMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
redisClient: Redis redisClient: Redis
sessionId: string sessionId: string
windowSize?: number
} }
class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
isSessionIdUsingChatMessageId? = false
sessionId = '' sessionId = ''
redisClient: Redis redisClient: Redis
windowSize?: number
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
super(fields) super(fields)
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
this.sessionId = fields.sessionId this.sessionId = fields.sessionId
this.redisClient = fields.redisClient this.redisClient = fields.redisClient
this.windowSize = fields.windowSize
} }
async getChatMessages(overrideSessionId = '', returnBaseMessage = false): Promise<IMessage[] | BaseMessage[]> { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise<IMessage[] | BaseMessage[]> {
if (!this.redisClient) return [] if (!this.redisClient) return []
const id = overrideSessionId ?? this.sessionId const id = overrideSessionId ?? this.sessionId
const rawStoredMessages = await this.redisClient.lrange(id, 0, -1) const rawStoredMessages = await this.redisClient.lrange(id, this.windowSize ? this.windowSize * -1 : 0, -1)
const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message))
const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage) const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage)
return returnBaseMessage ? baseMessages : convertBaseMessagetoIMessage(baseMessages) return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
} }
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
@ -236,10 +204,6 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
await this.redisClient.del(id) await this.redisClient.del(id)
await this.clear() await this.clear()
} }
async resumeMessages(): Promise<void> {
return
}
} }
module.exports = { nodeClass: RedisBackedChatMemory_Memory } module.exports = { nodeClass: RedisBackedChatMemory_Memory }

View File

@ -3,13 +3,7 @@ import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { UpstashRedisChatMessageHistory } from 'langchain/stores/message/upstash_redis' import { UpstashRedisChatMessageHistory } from 'langchain/stores/message/upstash_redis'
import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage, BaseMessage } from 'langchain/schema' import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage, BaseMessage } from 'langchain/schema'
import { FlowiseMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' import { FlowiseMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface'
import { import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
convertBaseMessagetoIMessage,
getBaseClasses,
getCredentialData,
getCredentialParam,
serializeChatHistory
} from '../../../src/utils'
import { ICommonObject } from '../../../src/Interface' import { ICommonObject } from '../../../src/Interface'
class UpstashRedisBackedChatMemory_Memory implements INode { class UpstashRedisBackedChatMemory_Memory implements INode {
@ -51,7 +45,8 @@ class UpstashRedisBackedChatMemory_Memory implements INode {
label: 'Session Id', label: 'Session Id',
name: 'sessionId', name: 'sessionId',
type: 'string', type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '', default: '',
additionalParams: true, additionalParams: true,
optional: true optional: true
@ -70,40 +65,12 @@ class UpstashRedisBackedChatMemory_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return initalizeUpstashRedis(nodeData, options) return initalizeUpstashRedis(nodeData, options)
} }
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const redis = await initalizeUpstashRedis(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing Upstash Redis memory session ${sessionId ? sessionId : chatId}`)
await redis.clear()
options.logger.info(`Successfully cleared Upstash Redis memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const redis = await initalizeUpstashRedis(nodeData, options)
const key = 'chat_history'
const memoryResult = await redis.loadMemoryVariables({})
return serializeChatHistory(memoryResult[key])
}
}
} }
const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => { const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => {
const baseURL = nodeData.inputs?.baseURL as string const baseURL = nodeData.inputs?.baseURL as string
const sessionTTL = nodeData.inputs?.sessionTTL as string const sessionTTL = nodeData.inputs?.sessionTTL as string
const chatId = options?.chatId as string const sessionId = nodeData.inputs?.sessionId as string
let isSessionIdUsingChatMessageId = false
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const upstashRestToken = getCredentialParam('upstashRestToken', credentialData, nodeData) const upstashRestToken = getCredentialParam('upstashRestToken', credentialData, nodeData)
@ -122,7 +89,6 @@ const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject
const memory = new BufferMemoryExtended({ const memory = new BufferMemoryExtended({
memoryKey: 'chat_history', memoryKey: 'chat_history',
chatHistory: redisChatMessageHistory, chatHistory: redisChatMessageHistory,
isSessionIdUsingChatMessageId,
sessionId, sessionId,
redisClient: client redisClient: client
}) })
@ -131,19 +97,16 @@ const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject
} }
interface BufferMemoryExtendedInput { interface BufferMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
redisClient: Redis redisClient: Redis
sessionId: string sessionId: string
} }
class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
isSessionIdUsingChatMessageId? = false
sessionId = '' sessionId = ''
redisClient: Redis redisClient: Redis
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
super(fields) super(fields)
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
this.sessionId = fields.sessionId this.sessionId = fields.sessionId
this.redisClient = fields.redisClient this.redisClient = fields.redisClient
} }
@ -186,10 +149,6 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
await this.redisClient.del(id) await this.redisClient.del(id)
await this.clear() await this.clear()
} }
async resumeMessages(): Promise<void> {
return
}
} }
module.exports = { nodeClass: UpstashRedisBackedChatMemory_Memory } module.exports = { nodeClass: UpstashRedisBackedChatMemory_Memory }

View File

@ -2,7 +2,7 @@ import { IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } f
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ZepMemory, ZepMemoryInput } from 'langchain/memory/zep' import { ZepMemory, ZepMemoryInput } from 'langchain/memory/zep'
import { ICommonObject } from '../../../src' import { ICommonObject } from '../../../src'
import { InputValues, MemoryVariables, OutputValues, getBufferString } from 'langchain/memory' import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory'
import { BaseMessage } from 'langchain/schema' import { BaseMessage } from 'langchain/schema'
class ZepMemory_Memory implements INode { class ZepMemory_Memory implements INode {
@ -55,10 +55,9 @@ class ZepMemory_Memory implements INode {
label: 'Size', label: 'Size',
name: 'k', name: 'k',
type: 'number', type: 'number',
placeholder: '10', default: '10',
description: 'Window of size k to surface the last k back-and-forth to use as memory.', description: 'Window of size k to surface the last k back-and-forth to use as memory.',
additionalParams: true, additionalParams: true
optional: true
}, },
{ {
label: 'AI Prefix', label: 'AI Prefix',
@ -101,27 +100,6 @@ class ZepMemory_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return await initalizeZep(nodeData, options) return await initalizeZep(nodeData, options)
} }
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const zep = await initalizeZep(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing Zep memory session ${sessionId ? sessionId : chatId}`)
await zep.clear()
options.logger.info(`Successfully cleared Zep memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const aiPrefix = nodeData.inputs?.aiPrefix as string
const humanPrefix = nodeData.inputs?.humanPrefix as string
const zep = await initalizeZep(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await zep.loadMemoryVariables({})
return getBufferString(memoryResult[key], humanPrefix, aiPrefix)
}
}
} }
const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promise<ZepMemory> => { const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promise<ZepMemory> => {
@ -131,30 +109,19 @@ const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promis
const memoryKey = nodeData.inputs?.memoryKey as string const memoryKey = nodeData.inputs?.memoryKey as string
const inputKey = nodeData.inputs?.inputKey as string const inputKey = nodeData.inputs?.inputKey as string
const k = nodeData.inputs?.k as string const k = nodeData.inputs?.k as string
const chatId = options?.chatId as string const sessionId = nodeData.inputs?.sessionId as string
let isSessionIdUsingChatMessageId = false
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData) const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
const obj: ZepMemoryInput & ZepMemoryExtendedInput = { const obj: ZepMemoryInput & ZepMemoryExtendedInput = {
baseURL, baseURL,
sessionId,
aiPrefix, aiPrefix,
humanPrefix, humanPrefix,
returnMessages: true, returnMessages: true,
memoryKey, memoryKey,
inputKey, inputKey,
isSessionIdUsingChatMessageId, sessionId,
k: k ? parseInt(k, 10) : undefined k: k ? parseInt(k, 10) : undefined
} }
if (apiKey) obj.apiKey = apiKey if (apiKey) obj.apiKey = apiKey
@ -163,17 +130,14 @@ const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promis
} }
interface ZepMemoryExtendedInput { interface ZepMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
k?: number k?: number
} }
class ZepMemoryExtended extends ZepMemory implements MemoryMethods { class ZepMemoryExtended extends ZepMemory implements MemoryMethods {
isSessionIdUsingChatMessageId? = false
lastN?: number lastN?: number
constructor(fields: ZepMemoryInput & ZepMemoryExtendedInput) { constructor(fields: ZepMemoryInput & ZepMemoryExtendedInput) {
super(fields) super(fields)
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
this.lastN = fields.k this.lastN = fields.k
} }

View File

@ -60,7 +60,7 @@ class CustomTool_Tools implements INode {
} }
} }
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const selectedToolId = nodeData.inputs?.selectedTool as string const selectedToolId = nodeData.inputs?.selectedTool as string
const customToolFunc = nodeData.inputs?.customToolFunc as string const customToolFunc = nodeData.inputs?.customToolFunc as string
@ -99,11 +99,7 @@ class CustomTool_Tools implements INode {
} }
} }
const flow = { const flow = { chatflowId: options.chatflowid }
chatId: options.chatId, // id is uppercase (I)
chatflowId: options.chatflowid, // id is lowercase (i)
input
}
let dynamicStructuredTool = new DynamicStructuredTool(obj) let dynamicStructuredTool = new DynamicStructuredTool(obj)
dynamicStructuredTool.setVariables(variables) dynamicStructuredTool.setVariables(variables)

View File

@ -55,7 +55,12 @@ export class DynamicStructuredTool<
this.schema = fields.schema this.schema = fields.schema
} }
async call(arg: z.output<T>, configArg?: RunnableConfig | Callbacks, tags?: string[], overrideSessionId?: string): Promise<string> { async call(
arg: z.output<T>,
configArg?: RunnableConfig | Callbacks,
tags?: string[],
flowConfig?: { sessionId?: string; chatId?: string; input?: string }
): Promise<string> {
const config = parseCallbackConfigArg(configArg) const config = parseCallbackConfigArg(configArg)
if (config.runName === undefined) { if (config.runName === undefined) {
config.runName = this.name config.runName = this.name
@ -86,7 +91,7 @@ export class DynamicStructuredTool<
) )
let result let result
try { try {
result = await this._call(parsed, runManager, overrideSessionId) result = await this._call(parsed, runManager, flowConfig)
} catch (e) { } catch (e) {
await runManager?.handleToolError(e) await runManager?.handleToolError(e)
throw e throw e
@ -95,7 +100,11 @@ export class DynamicStructuredTool<
return result return result
} }
protected async _call(arg: z.output<T>, _?: CallbackManagerForToolRun, overrideSessionId?: string): Promise<string> { protected async _call(
arg: z.output<T>,
_?: CallbackManagerForToolRun,
flowConfig?: { sessionId?: string; chatId?: string; input?: string }
): Promise<string> {
let sandbox: any = {} let sandbox: any = {}
if (typeof arg === 'object' && Object.keys(arg).length) { if (typeof arg === 'object' && Object.keys(arg).length) {
for (const item in arg) { for (const item in arg) {
@ -126,7 +135,7 @@ export class DynamicStructuredTool<
// inject flow properties // inject flow properties
if (this.flowObj) { if (this.flowObj) {
sandbox['$flow'] = { ...this.flowObj, sessionId: overrideSessionId } sandbox['$flow'] = { ...this.flowObj, ...flowConfig }
} }
const defaultAllowBuiltInDep = [ const defaultAllowBuiltInDep = [

View File

@ -46,6 +46,7 @@
"dotenv": "^16.0.0", "dotenv": "^16.0.0",
"express": "^4.17.3", "express": "^4.17.3",
"faiss-node": "^0.2.2", "faiss-node": "^0.2.2",
"fast-json-patch": "^3.1.1",
"form-data": "^4.0.0", "form-data": "^4.0.0",
"google-auth-library": "^9.0.0", "google-auth-library": "^9.0.0",
"graphql": "^16.6.0", "graphql": "^16.6.0",

View File

@ -108,10 +108,6 @@ export interface INode extends INodeProperties {
search: (nodeData: INodeData, options?: ICommonObject) => Promise<any> search: (nodeData: INodeData, options?: ICommonObject) => Promise<any>
delete: (nodeData: INodeData, options?: ICommonObject) => Promise<void> delete: (nodeData: INodeData, options?: ICommonObject) => Promise<void>
} }
memoryMethods?: {
clearSessionMemory: (nodeData: INodeData, options?: ICommonObject) => Promise<void>
getChatMessages: (nodeData: INodeData, options?: ICommonObject) => Promise<string>
}
init?(nodeData: INodeData, input: string, options?: ICommonObject): Promise<any> init?(nodeData: INodeData, input: string, options?: ICommonObject): Promise<any>
run?(nodeData: INodeData, input: string, options?: ICommonObject): Promise<string | ICommonObject> run?(nodeData: INodeData, input: string, options?: ICommonObject): Promise<string | ICommonObject>
} }
@ -204,29 +200,37 @@ import { BaseMessage } from 'langchain/schema'
import { BufferMemory, BufferWindowMemory, ConversationSummaryMemory } from 'langchain/memory' import { BufferMemory, BufferWindowMemory, ConversationSummaryMemory } from 'langchain/memory'
export interface MemoryMethods { export interface MemoryMethods {
getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean): Promise<IMessage[] | BaseMessage[]> getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean, prevHistory?: IMessage[]): Promise<IMessage[] | BaseMessage[]>
addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void> addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void>
clearChatMessages(overrideSessionId?: string): Promise<void> clearChatMessages(overrideSessionId?: string): Promise<void>
resumeMessages?(messages: IMessage[]): Promise<void>
} }
export abstract class FlowiseMemory extends BufferMemory implements MemoryMethods { export abstract class FlowiseMemory extends BufferMemory implements MemoryMethods {
abstract getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean): Promise<IMessage[] | BaseMessage[]> abstract getChatMessages(
overrideSessionId?: string,
returnBaseMessages?: boolean,
prevHistory?: IMessage[]
): Promise<IMessage[] | BaseMessage[]>
abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void> abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void>
abstract clearChatMessages(overrideSessionId?: string): Promise<void> abstract clearChatMessages(overrideSessionId?: string): Promise<void>
abstract resumeMessages(messages: IMessage[]): Promise<void>
} }
export abstract class FlowiseWindowMemory extends BufferWindowMemory implements MemoryMethods { export abstract class FlowiseWindowMemory extends BufferWindowMemory implements MemoryMethods {
abstract getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean): Promise<IMessage[] | BaseMessage[]> abstract getChatMessages(
overrideSessionId?: string,
returnBaseMessages?: boolean,
prevHistory?: IMessage[]
): Promise<IMessage[] | BaseMessage[]>
abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void> abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void>
abstract clearChatMessages(overrideSessionId?: string): Promise<void> abstract clearChatMessages(overrideSessionId?: string): Promise<void>
abstract resumeMessages(messages: IMessage[]): Promise<void>
} }
export abstract class FlowiseSummaryMemory extends ConversationSummaryMemory implements MemoryMethods { export abstract class FlowiseSummaryMemory extends ConversationSummaryMemory implements MemoryMethods {
abstract getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean): Promise<IMessage[] | BaseMessage[]> abstract getChatMessages(
overrideSessionId?: string,
returnBaseMessages?: boolean,
prevHistory?: IMessage[]
): Promise<IMessage[] | BaseMessage[]>
abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void> abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void>
abstract clearChatMessages(overrideSessionId?: string): Promise<void> abstract clearChatMessages(overrideSessionId?: string): Promise<void>
abstract resumeMessages(messages: IMessage[]): Promise<void>
} }

View File

@ -0,0 +1,615 @@
import { AgentExecutorInput, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, StoppingMethod } from 'langchain/agents'
import { ChainValues, AgentStep, AgentFinish, AgentAction, BaseMessage, FunctionMessage, AIMessage } from 'langchain/schema'
import { OutputParserException } from 'langchain/schema/output_parser'
import { CallbackManager, CallbackManagerForChainRun, Callbacks } from 'langchain/callbacks'
import { ToolInputParsingException, Tool } from '@langchain/core/tools'
import { Runnable } from 'langchain/schema/runnable'
import { BaseChain, SerializedLLMChain } from 'langchain/chains'
import { Serializable } from '@langchain/core/load/serializable'
type AgentExecutorOutput = ChainValues
interface AgentExecutorIteratorInput {
agentExecutor: AgentExecutor
inputs: Record<string, string>
callbacks?: Callbacks
tags?: string[]
metadata?: Record<string, unknown>
runName?: string
runManager?: CallbackManagerForChainRun
}
//TODO: stream tools back
export class AgentExecutorIterator extends Serializable implements AgentExecutorIteratorInput {
lc_namespace = ['langchain', 'agents', 'executor_iterator']
agentExecutor: AgentExecutor
inputs: Record<string, string>
callbacks: Callbacks
tags: string[] | undefined
metadata: Record<string, unknown> | undefined
runName: string | undefined
private _finalOutputs: Record<string, unknown> | undefined
get finalOutputs(): Record<string, unknown> | undefined {
return this._finalOutputs
}
/** Intended to be used as a setter method, needs to be async. */
async setFinalOutputs(value: Record<string, unknown> | undefined) {
this._finalOutputs = undefined
if (value) {
const preparedOutputs: Record<string, unknown> = await this.agentExecutor.prepOutputs(this.inputs, value, true)
this._finalOutputs = preparedOutputs
}
}
runManager: CallbackManagerForChainRun | undefined
intermediateSteps: AgentStep[] = []
iterations = 0
get nameToToolMap(): Record<string, Tool> {
const toolMap = this.agentExecutor.tools.map((tool) => ({
[tool.name]: tool
}))
return Object.assign({}, ...toolMap)
}
constructor(fields: AgentExecutorIteratorInput) {
super(fields)
this.agentExecutor = fields.agentExecutor
this.inputs = fields.inputs
this.tags = fields.tags
this.metadata = fields.metadata
this.runName = fields.runName
this.runManager = fields.runManager
}
/**
* Reset the iterator to its initial state, clearing intermediate steps,
* iterations, and the final output.
*/
reset(): void {
this.intermediateSteps = []
this.iterations = 0
this._finalOutputs = undefined
}
updateIterations(): void {
this.iterations += 1
}
async *streamIterator() {
this.reset()
// Loop to handle iteration
while (true) {
try {
if (this.iterations === 0) {
await this.onFirstStep()
}
const result = await this._callNext()
yield result
} catch (e: any) {
if ('message' in e && e.message.startsWith('Final outputs already reached: ')) {
if (!this.finalOutputs) {
throw e
}
return this.finalOutputs
}
if (this.runManager) {
await this.runManager.handleChainError(e)
}
throw e
}
}
}
/**
* Perform any necessary setup for the first step
* of the asynchronous iterator.
*/
async onFirstStep(): Promise<void> {
if (this.iterations === 0) {
const callbackManager = await CallbackManager.configure(
this.callbacks,
this.agentExecutor.callbacks,
this.tags,
this.agentExecutor.tags,
this.metadata,
this.agentExecutor.metadata,
{
verbose: this.agentExecutor.verbose
}
)
this.runManager = await callbackManager?.handleChainStart(
this.agentExecutor.toJSON(),
this.inputs,
undefined,
undefined,
this.tags,
this.metadata,
this.runName
)
}
}
/**
* Execute the next step in the chain using the
* AgentExecutor's _takeNextStep method.
*/
async _executeNextStep(runManager?: CallbackManagerForChainRun): Promise<AgentFinish | AgentStep[]> {
return this.agentExecutor._takeNextStep(this.nameToToolMap, this.inputs, this.intermediateSteps, runManager)
}
/**
* Process the output of the next step,
* handling AgentFinish and tool return cases.
*/
async _processNextStepOutput(
nextStepOutput: AgentFinish | AgentStep[],
runManager?: CallbackManagerForChainRun
): Promise<Record<string, string | AgentStep[]>> {
if ('returnValues' in nextStepOutput) {
const output = await this.agentExecutor._return(nextStepOutput as AgentFinish, this.intermediateSteps, runManager)
if (this.runManager) {
await this.runManager.handleChainEnd(output)
}
await this.setFinalOutputs(output)
return output
}
this.intermediateSteps = this.intermediateSteps.concat(nextStepOutput as AgentStep[])
let output: Record<string, string | AgentStep[]> = {}
if (Array.isArray(nextStepOutput) && nextStepOutput.length === 1) {
const nextStep = nextStepOutput[0]
const toolReturn = await this.agentExecutor._getToolReturn(nextStep)
if (toolReturn) {
output = await this.agentExecutor._return(toolReturn, this.intermediateSteps, runManager)
if (this.runManager) {
await this.runManager.handleChainEnd(output)
}
await this.setFinalOutputs(output)
}
}
output = { intermediateSteps: nextStepOutput as AgentStep[] }
return output
}
async _stop(): Promise<Record<string, unknown>> {
const output = await this.agentExecutor.agent.returnStoppedResponse(
this.agentExecutor.earlyStoppingMethod,
this.intermediateSteps,
this.inputs
)
const returnedOutput = await this.agentExecutor._return(output, this.intermediateSteps, this.runManager)
await this.setFinalOutputs(returnedOutput)
return returnedOutput
}
async _callNext(): Promise<Record<string, unknown>> {
// final output already reached: stopiteration (final output)
if (this.finalOutputs) {
throw new Error(`Final outputs already reached: ${JSON.stringify(this.finalOutputs, null, 2)}`)
}
// timeout/max iterations: stopiteration (stopped response)
if (!this.agentExecutor.shouldContinueGetter(this.iterations)) {
return this._stop()
}
const nextStepOutput = await this._executeNextStep(this.runManager)
const output = await this._processNextStepOutput(nextStepOutput, this.runManager)
this.updateIterations()
return output
}
}
export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
static lc_name() {
return 'AgentExecutor'
}
get lc_namespace() {
return ['langchain', 'agents', 'executor']
}
agent: BaseSingleActionAgent | BaseMultiActionAgent
tools: this['agent']['ToolType'][]
returnIntermediateSteps = false
maxIterations?: number = 15
earlyStoppingMethod: StoppingMethod = 'force'
sessionId?: string
chatId?: string
input?: string
/**
* How to handle errors raised by the agent's output parser.
Defaults to `False`, which raises the error.
If `true`, the error will be sent back to the LLM as an observation.
If a string, the string itself will be sent to the LLM as an observation.
If a callable function, the function will be called with the exception
as an argument, and the result of that function will be passed to the agent
as an observation.
*/
handleParsingErrors: boolean | string | ((e: OutputParserException | ToolInputParsingException) => string) = false
get inputKeys() {
return this.agent.inputKeys
}
get outputKeys() {
return this.agent.returnValues
}
constructor(input: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string }) {
let agent: BaseSingleActionAgent | BaseMultiActionAgent
if (Runnable.isRunnable(input.agent)) {
agent = new RunnableAgent({ runnable: input.agent })
} else {
agent = input.agent
}
super(input)
this.agent = agent
this.tools = input.tools
this.handleParsingErrors = input.handleParsingErrors ?? this.handleParsingErrors
/* Getting rid of this because RunnableAgent doesnt allow return direct
if (this.agent._agentActionType() === "multi") {
for (const tool of this.tools) {
if (tool.returnDirect) {
throw new Error(
`Tool with return direct ${tool.name} not supported for multi-action agent.`
);
}
}
}*/
this.returnIntermediateSteps = input.returnIntermediateSteps ?? this.returnIntermediateSteps
this.maxIterations = input.maxIterations ?? this.maxIterations
this.earlyStoppingMethod = input.earlyStoppingMethod ?? this.earlyStoppingMethod
this.sessionId = input.sessionId
this.chatId = input.chatId
this.input = input.input
}
static fromAgentAndTools(fields: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string }): AgentExecutor {
const newInstance = new AgentExecutor(fields)
if (fields.sessionId) newInstance.sessionId = fields.sessionId
if (fields.chatId) newInstance.chatId = fields.chatId
if (fields.input) newInstance.input = fields.input
return newInstance
}
get shouldContinueGetter() {
return this.shouldContinue.bind(this)
}
/**
* Method that checks if the agent execution should continue based on the
* number of iterations.
* @param iterations The current number of iterations.
* @returns A boolean indicating whether the agent execution should continue.
*/
private shouldContinue(iterations: number): boolean {
return this.maxIterations === undefined || iterations < this.maxIterations
}
async _call(inputs: ChainValues, runManager?: CallbackManagerForChainRun): Promise<AgentExecutorOutput> {
const toolsByName = Object.fromEntries(this.tools.map((t) => [t.name.toLowerCase(), t]))
const steps: AgentStep[] = []
let iterations = 0
const getOutput = async (finishStep: AgentFinish): Promise<AgentExecutorOutput> => {
const { returnValues } = finishStep
const additional = await this.agent.prepareForOutput(returnValues, steps)
if (this.returnIntermediateSteps) {
return { ...returnValues, intermediateSteps: steps, ...additional }
}
await runManager?.handleAgentEnd(finishStep)
return { ...returnValues, ...additional }
}
while (this.shouldContinue(iterations)) {
let output
try {
output = await this.agent.plan(steps, inputs, runManager?.getChild())
} catch (e) {
if (e instanceof OutputParserException) {
let observation
let text = e.message
if (this.handleParsingErrors === true) {
if (e.sendToLLM) {
observation = e.observation
text = e.llmOutput ?? ''
} else {
observation = 'Invalid or incomplete response'
}
} else if (typeof this.handleParsingErrors === 'string') {
observation = this.handleParsingErrors
} else if (typeof this.handleParsingErrors === 'function') {
observation = this.handleParsingErrors(e)
} else {
throw e
}
output = {
tool: '_Exception',
toolInput: observation,
log: text
} as AgentAction
} else {
throw e
}
}
// Check if the agent has finished
if ('returnValues' in output) {
return getOutput(output)
}
let actions: AgentAction[]
if (Array.isArray(output)) {
actions = output as AgentAction[]
} else {
actions = [output as AgentAction]
}
const newSteps = await Promise.all(
actions.map(async (action) => {
await runManager?.handleAgentAction(action)
const tool = action.tool === '_Exception' ? new ExceptionTool() : toolsByName[action.tool?.toLowerCase()]
let observation
try {
/* Here we need to override Tool call method to include sessionId, chatId, input as parameter
* Tool Call Parameters:
* - arg: z.output<T>
* - configArg?: RunnableConfig | Callbacks
* - tags?: string[]
* - flowConfig?: { sessionId?: string, chatId?: string, input?: string }
*/
observation = tool
? // @ts-ignore
await tool.call(action.toolInput, runManager?.getChild(), undefined, {
sessionId: this.sessionId,
chatId: this.chatId,
input: this.input
})
: `${action.tool} is not a valid tool, try another one.`
} catch (e) {
if (e instanceof ToolInputParsingException) {
if (this.handleParsingErrors === true) {
observation = 'Invalid or incomplete tool input. Please try again.'
} else if (typeof this.handleParsingErrors === 'string') {
observation = this.handleParsingErrors
} else if (typeof this.handleParsingErrors === 'function') {
observation = this.handleParsingErrors(e)
} else {
throw e
}
observation = await new ExceptionTool().call(observation, runManager?.getChild())
return { action, observation: observation ?? '' }
}
}
return { action, observation: observation ?? '' }
})
)
steps.push(...newSteps)
const lastStep = steps[steps.length - 1]
const lastTool = toolsByName[lastStep.action.tool?.toLowerCase()]
if (lastTool?.returnDirect) {
return getOutput({
returnValues: { [this.agent.returnValues[0]]: lastStep.observation },
log: ''
})
}
iterations += 1
}
const finish = await this.agent.returnStoppedResponse(this.earlyStoppingMethod, steps, inputs)
return getOutput(finish)
}
async _takeNextStep(
nameToolMap: Record<string, Tool>,
inputs: ChainValues,
intermediateSteps: AgentStep[],
runManager?: CallbackManagerForChainRun
): Promise<AgentFinish | AgentStep[]> {
let output
try {
output = await this.agent.plan(intermediateSteps, inputs, runManager?.getChild())
} catch (e) {
if (e instanceof OutputParserException) {
let observation
let text = e.message
if (this.handleParsingErrors === true) {
if (e.sendToLLM) {
observation = e.observation
text = e.llmOutput ?? ''
} else {
observation = 'Invalid or incomplete response'
}
} else if (typeof this.handleParsingErrors === 'string') {
observation = this.handleParsingErrors
} else if (typeof this.handleParsingErrors === 'function') {
observation = this.handleParsingErrors(e)
} else {
throw e
}
output = {
tool: '_Exception',
toolInput: observation,
log: text
} as AgentAction
} else {
throw e
}
}
if ('returnValues' in output) {
return output
}
let actions: AgentAction[]
if (Array.isArray(output)) {
actions = output as AgentAction[]
} else {
actions = [output as AgentAction]
}
const result: AgentStep[] = []
for (const agentAction of actions) {
let observation = ''
if (runManager) {
await runManager?.handleAgentAction(agentAction)
}
if (agentAction.tool in nameToolMap) {
const tool = nameToolMap[agentAction.tool]
try {
/* Here we need to override Tool call method to include sessionId, chatId, input as parameter
* Tool Call Parameters:
* - arg: z.output<T>
* - configArg?: RunnableConfig | Callbacks
* - tags?: string[]
* - flowConfig?: { sessionId?: string, chatId?: string, input?: string }
*/
// @ts-ignore
observation = await tool.call(agentAction.toolInput, runManager?.getChild(), undefined, {
sessionId: this.sessionId,
chatId: this.chatId,
input: this.input
})
} catch (e) {
if (e instanceof ToolInputParsingException) {
if (this.handleParsingErrors === true) {
observation = 'Invalid or incomplete tool input. Please try again.'
} else if (typeof this.handleParsingErrors === 'string') {
observation = this.handleParsingErrors
} else if (typeof this.handleParsingErrors === 'function') {
observation = this.handleParsingErrors(e)
} else {
throw e
}
observation = await new ExceptionTool().call(observation, runManager?.getChild())
}
}
} else {
observation = `${agentAction.tool} is not a valid tool, try another available tool: ${Object.keys(nameToolMap).join(', ')}`
}
result.push({
action: agentAction,
observation
})
}
return result
}
async _return(
output: AgentFinish,
intermediateSteps: AgentStep[],
runManager?: CallbackManagerForChainRun
): Promise<AgentExecutorOutput> {
if (runManager) {
await runManager.handleAgentEnd(output)
}
const finalOutput: Record<string, unknown> = output.returnValues
if (this.returnIntermediateSteps) {
finalOutput.intermediateSteps = intermediateSteps
}
return finalOutput
}
async _getToolReturn(nextStepOutput: AgentStep): Promise<AgentFinish | null> {
const { action, observation } = nextStepOutput
const nameToolMap = Object.fromEntries(this.tools.map((t) => [t.name.toLowerCase(), t]))
const [returnValueKey = 'output'] = this.agent.returnValues
// Invalid tools won't be in the map, so we return False.
if (action.tool in nameToolMap) {
if (nameToolMap[action.tool].returnDirect) {
return {
returnValues: { [returnValueKey]: observation },
log: ''
}
}
}
return null
}
_returnStoppedResponse(earlyStoppingMethod: StoppingMethod) {
if (earlyStoppingMethod === 'force') {
return {
returnValues: {
output: 'Agent stopped due to iteration limit or time limit.'
},
log: ''
} as AgentFinish
}
throw new Error(`Got unsupported early_stopping_method: ${earlyStoppingMethod}`)
}
async *_streamIterator(inputs: Record<string, any>): AsyncGenerator<ChainValues> {
const agentExecutorIterator = new AgentExecutorIterator({
inputs,
agentExecutor: this,
metadata: this.metadata,
tags: this.tags,
callbacks: this.callbacks
})
const iterator = agentExecutorIterator.streamIterator()
for await (const step of iterator) {
if (!step) {
continue
}
yield step
}
}
_chainType() {
return 'agent_executor' as const
}
serialize(): SerializedLLMChain {
throw new Error('Cannot serialize an AgentExecutor')
}
}
class ExceptionTool extends Tool {
name = '_Exception'
description = 'Exception tool'
async _call(query: string) {
return query
}
}
export const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] =>
steps.flatMap(({ action, observation }) => {
if ('messageLog' in action && action.messageLog !== undefined) {
const log = action.messageLog as BaseMessage[]
return log.concat(new FunctionMessage(observation, action.tool))
} else {
return [new AIMessage(action.log)]
}
})

View File

@ -936,7 +936,7 @@
"id": "conversationalAgent_0-input-tools-Tool" "id": "conversationalAgent_0-input-tools-Tool"
}, },
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseChatModel", "type": "BaseChatModel",
"id": "conversationalAgent_0-input-model-BaseChatModel" "id": "conversationalAgent_0-input-model-BaseChatModel"

View File

@ -13,7 +13,7 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 1, "version": 2,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
@ -28,47 +28,36 @@
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
}, },
{ {
"label": "System Message", "label": "Rephrase Prompt",
"name": "systemMessagePrompt", "name": "rephrasePrompt",
"type": "string", "type": "string",
"description": "Using previous chat history, rephrase question into a standalone question",
"warning": "Prompt must include input variables: {chat_history} and {question}",
"rows": 4, "rows": 4,
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
}, },
{ {
"label": "Chain Option", "label": "Response Prompt",
"name": "chainOption", "name": "responsePrompt",
"type": "options", "type": "string",
"options": [ "description": "Taking the rephrased question, search for answer from the provided context",
{ "warning": "Prompt must include input variable: {context}",
"label": "MapReduceDocumentsChain", "rows": 4,
"name": "map_reduce",
"description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time"
},
{
"label": "RefineDocumentsChain",
"name": "refine",
"description": "Suitable for QA tasks over a large number of documents."
},
{
"label": "StuffDocumentsChain",
"name": "stuff",
"description": "Suitable for QA tasks over a small number of documents."
}
],
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"id": "conversationalRetrievalQAChain_0-input-chainOption-options" "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
"id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
} }
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseChatModel",
"id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
}, },
{ {
"label": "Vector Store Retriever", "label": "Vector Store Retriever",
@ -89,9 +78,8 @@
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}",
"memory": "", "memory": "",
"returnSourceDocuments": "", "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"systemMessagePrompt": "", "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
"chainOption": ""
}, },
"outputAnchors": [ "outputAnchors": [
{ {
@ -625,9 +613,9 @@
"source": "chatOpenAI_0", "source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationalRetrievalQAChain_0", "target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge", "type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": { "data": {
"label": "" "label": ""
} }

View File

@ -90,7 +90,7 @@
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseChatModel", "type": "BaseChatModel",
"id": "conversationChain_0-input-model-BaseChatModel" "id": "conversationChain_0-input-model-BaseChatModel"

View File

@ -354,7 +354,7 @@
"id": "conversationalAgent_0-input-tools-Tool" "id": "conversationalAgent_0-input-tools-Tool"
}, },
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseChatModel", "type": "BaseChatModel",
"id": "conversationalAgent_0-input-model-BaseChatModel" "id": "conversationalAgent_0-input-model-BaseChatModel"

View File

@ -249,10 +249,10 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 1, "version": 2,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
"category": "Chains", "category": "Chains",
"description": "Document QA - built on RetrievalQAChain to provide a chat history component", "description": "Document QA - built on RetrievalQAChain to provide a chat history component",
"inputParams": [ "inputParams": [
@ -264,47 +264,36 @@
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
}, },
{ {
"label": "System Message", "label": "Rephrase Prompt",
"name": "systemMessagePrompt", "name": "rephrasePrompt",
"type": "string", "type": "string",
"description": "Using previous chat history, rephrase question into a standalone question",
"warning": "Prompt must include input variables: {chat_history} and {question}",
"rows": 4, "rows": 4,
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
}, },
{ {
"label": "Chain Option", "label": "Response Prompt",
"name": "chainOption", "name": "responsePrompt",
"type": "options", "type": "string",
"options": [ "description": "Taking the rephrased question, search for answer from the provided context",
{ "warning": "Prompt must include input variable: {context}",
"label": "MapReduceDocumentsChain", "rows": 4,
"name": "map_reduce",
"description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time"
},
{
"label": "RefineDocumentsChain",
"name": "refine",
"description": "Suitable for QA tasks over a large number of documents."
},
{
"label": "StuffDocumentsChain",
"name": "stuff",
"description": "Suitable for QA tasks over a small number of documents."
}
],
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"id": "conversationalRetrievalQAChain_0-input-chainOption-options" "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
"id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
} }
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseChatModel",
"id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
}, },
{ {
"label": "Vector Store Retriever", "label": "Vector Store Retriever",
@ -325,16 +314,15 @@
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{pinecone_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}",
"memory": "", "memory": "",
"returnSourceDocuments": "", "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"systemMessagePrompt": "", "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
"chainOption": ""
}, },
"outputAnchors": [ "outputAnchors": [
{ {
"id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain", "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable",
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"label": "ConversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain | BaseChain" "type": "ConversationalRetrievalQAChain | BaseChain | Runnable"
} }
], ],
"outputs": {}, "outputs": {},
@ -704,9 +692,9 @@
"source": "chatOpenAI_0", "source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationalRetrievalQAChain_0", "target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge", "type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": { "data": {
"label": "" "label": ""
} }

View File

@ -156,9 +156,9 @@
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"version": 1, "version": 2,
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
"category": "Chains", "category": "Chains",
"description": "Document QA - built on RetrievalQAChain to provide a chat history component", "description": "Document QA - built on RetrievalQAChain to provide a chat history component",
"inputParams": [ "inputParams": [
@ -170,47 +170,36 @@
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
}, },
{ {
"label": "System Message", "label": "Rephrase Prompt",
"name": "systemMessagePrompt", "name": "rephrasePrompt",
"type": "string", "type": "string",
"description": "Using previous chat history, rephrase question into a standalone question",
"warning": "Prompt must include input variables: {chat_history} and {question}",
"rows": 4, "rows": 4,
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
}, },
{ {
"label": "Chain Option", "label": "Response Prompt",
"name": "chainOption", "name": "responsePrompt",
"type": "options", "type": "string",
"options": [ "description": "Taking the rephrased question, search for answer from the provided context",
{ "warning": "Prompt must include input variable: {context}",
"label": "MapReduceDocumentsChain", "rows": 4,
"name": "map_reduce",
"description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time"
},
{
"label": "RefineDocumentsChain",
"name": "refine",
"description": "Suitable for QA tasks over a large number of documents."
},
{
"label": "StuffDocumentsChain",
"name": "stuff",
"description": "Suitable for QA tasks over a small number of documents."
}
],
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"id": "conversationalRetrievalQAChain_0-input-chainOption-options" "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
"id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
} }
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseChatModel",
"id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
}, },
{ {
"label": "Vector Store Retriever", "label": "Vector Store Retriever",
@ -232,15 +221,15 @@
"vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}",
"memory": "", "memory": "",
"returnSourceDocuments": true, "returnSourceDocuments": true,
"systemMessagePrompt": "", "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"chainOption": "" "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
}, },
"outputAnchors": [ "outputAnchors": [
{ {
"id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain", "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable",
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"label": "ConversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain | BaseChain" "type": "ConversationalRetrievalQAChain | BaseChain | Runnable"
} }
], ],
"outputs": {}, "outputs": {},
@ -668,9 +657,9 @@
"source": "chatOpenAI_0", "source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
"target": "conversationalRetrievalQAChain_0", "target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge", "type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": { "data": {
"label": "" "label": ""
} }

View File

@ -83,10 +83,10 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 1, "version": 2,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "BaseLangChain"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
"category": "Chains", "category": "Chains",
"description": "Document QA - built on RetrievalQAChain to provide a chat history component", "description": "Document QA - built on RetrievalQAChain to provide a chat history component",
"inputParams": [ "inputParams": [
@ -98,47 +98,36 @@
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
}, },
{ {
"label": "System Message", "label": "Rephrase Prompt",
"name": "systemMessagePrompt", "name": "rephrasePrompt",
"type": "string", "type": "string",
"description": "Using previous chat history, rephrase question into a standalone question",
"warning": "Prompt must include input variables: {chat_history} and {question}",
"rows": 4, "rows": 4,
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
}, },
{ {
"label": "Chain Option", "label": "Response Prompt",
"name": "chainOption", "name": "responsePrompt",
"type": "options", "type": "string",
"options": [ "description": "Taking the rephrased question, search for answer from the provided context",
{ "warning": "Prompt must include input variable: {context}",
"label": "MapReduceDocumentsChain", "rows": 4,
"name": "map_reduce",
"description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time"
},
{
"label": "RefineDocumentsChain",
"name": "refine",
"description": "Suitable for QA tasks over a large number of documents."
},
{
"label": "StuffDocumentsChain",
"name": "stuff",
"description": "Suitable for QA tasks over a small number of documents."
}
],
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"id": "conversationalRetrievalQAChain_0-input-chainOption-options" "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
"id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
} }
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseChatModel",
"id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
}, },
{ {
"label": "Vector Store Retriever", "label": "Vector Store Retriever",
@ -158,14 +147,16 @@
"inputs": { "inputs": {
"model": "{{chatOllama_0.data.instance}}", "model": "{{chatOllama_0.data.instance}}",
"vectorStoreRetriever": "{{faiss_0.data.instance}}", "vectorStoreRetriever": "{{faiss_0.data.instance}}",
"memory": "" "memory": "",
"rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
}, },
"outputAnchors": [ "outputAnchors": [
{ {
"id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|BaseLangChain", "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable",
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"label": "ConversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain | BaseChain | BaseLangChain" "type": "ConversationalRetrievalQAChain | BaseChain | Runnable"
} }
], ],
"outputs": {}, "outputs": {},
@ -649,9 +640,9 @@
"source": "chatOllama_0", "source": "chatOllama_0",
"sourceHandle": "chatOllama_0-output-chatOllama-ChatOllama|SimpleChatModel|BaseChatModel|BaseLanguageModel|Runnable", "sourceHandle": "chatOllama_0-output-chatOllama-ChatOllama|SimpleChatModel|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationalRetrievalQAChain_0", "target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge", "type": "buttonedge",
"id": "chatOllama_0-chatOllama_0-output-chatOllama-ChatOllama|SimpleChatModel|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "id": "chatOllama_0-chatOllama_0-output-chatOllama-ChatOllama|SimpleChatModel|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": { "data": {
"label": "" "label": ""
} }

View File

@ -13,10 +13,10 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 1, "version": 2,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "BaseLangChain"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
"category": "Chains", "category": "Chains",
"description": "Document QA - built on RetrievalQAChain to provide a chat history component", "description": "Document QA - built on RetrievalQAChain to provide a chat history component",
"inputParams": [ "inputParams": [
@ -28,47 +28,36 @@
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
}, },
{ {
"label": "System Message", "label": "Rephrase Prompt",
"name": "systemMessagePrompt", "name": "rephrasePrompt",
"type": "string", "type": "string",
"description": "Using previous chat history, rephrase question into a standalone question",
"warning": "Prompt must include input variables: {chat_history} and {question}",
"rows": 4, "rows": 4,
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
}, },
{ {
"label": "Chain Option", "label": "Response Prompt",
"name": "chainOption", "name": "responsePrompt",
"type": "options", "type": "string",
"options": [ "description": "Taking the rephrased question, search for answer from the provided context",
{ "warning": "Prompt must include input variable: {context}",
"label": "MapReduceDocumentsChain", "rows": 4,
"name": "map_reduce",
"description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time"
},
{
"label": "RefineDocumentsChain",
"name": "refine",
"description": "Suitable for QA tasks over a large number of documents."
},
{
"label": "StuffDocumentsChain",
"name": "stuff",
"description": "Suitable for QA tasks over a small number of documents."
}
],
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"id": "conversationalRetrievalQAChain_0-input-chainOption-options" "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
"id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
} }
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseChatModel",
"id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
}, },
{ {
"label": "Vector Store Retriever", "label": "Vector Store Retriever",
@ -89,14 +78,16 @@
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{qdrant_0.data.instance}}", "vectorStoreRetriever": "{{qdrant_0.data.instance}}",
"memory": "{{ZepMemory_0.data.instance}}", "memory": "{{ZepMemory_0.data.instance}}",
"returnSourceDocuments": true "returnSourceDocuments": true,
"rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
}, },
"outputAnchors": [ "outputAnchors": [
{ {
"id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|BaseLangChain", "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable",
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"label": "ConversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain | BaseChain | BaseLangChain" "type": "ConversationalRetrievalQAChain | BaseChain | Runnable"
} }
], ],
"outputs": {}, "outputs": {},
@ -232,7 +223,7 @@
"label": "Session Id", "label": "Session Id",
"name": "sessionId", "name": "sessionId",
"type": "string", "type": "string",
"description": "if empty, chatId will be used automatically", "description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat\">more</a>",
"default": "", "default": "",
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
@ -709,9 +700,9 @@
"source": "chatOpenAI_0", "source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationalRetrievalQAChain_0", "target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge", "type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": { "data": {
"label": "" "label": ""
} }

View File

@ -249,10 +249,10 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 1, "version": 2,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "BaseLangChain"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
"category": "Chains", "category": "Chains",
"description": "Document QA - built on RetrievalQAChain to provide a chat history component", "description": "Document QA - built on RetrievalQAChain to provide a chat history component",
"inputParams": [ "inputParams": [
@ -264,47 +264,36 @@
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
}, },
{ {
"label": "System Message", "label": "Rephrase Prompt",
"name": "systemMessagePrompt", "name": "rephrasePrompt",
"type": "string", "type": "string",
"description": "Using previous chat history, rephrase question into a standalone question",
"warning": "Prompt must include input variables: {chat_history} and {question}",
"rows": 4, "rows": 4,
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
}, },
{ {
"label": "Chain Option", "label": "Response Prompt",
"name": "chainOption", "name": "responsePrompt",
"type": "options", "type": "string",
"options": [ "description": "Taking the rephrased question, search for answer from the provided context",
{ "warning": "Prompt must include input variable: {context}",
"label": "MapReduceDocumentsChain", "rows": 4,
"name": "map_reduce",
"description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time"
},
{
"label": "RefineDocumentsChain",
"name": "refine",
"description": "Suitable for QA tasks over a large number of documents."
},
{
"label": "StuffDocumentsChain",
"name": "stuff",
"description": "Suitable for QA tasks over a small number of documents."
}
],
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"id": "conversationalRetrievalQAChain_0-input-chainOption-options" "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
"id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
} }
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseChatModel",
"id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
}, },
{ {
"label": "Vector Store Retriever", "label": "Vector Store Retriever",
@ -323,14 +312,16 @@
], ],
"inputs": { "inputs": {
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{pinecone_0.data.instance}}" "vectorStoreRetriever": "{{pinecone_0.data.instance}}",
"rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
}, },
"outputAnchors": [ "outputAnchors": [
{ {
"id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|BaseLangChain", "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable",
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"label": "ConversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain | BaseChain | BaseLangChain" "type": "ConversationalRetrievalQAChain | BaseChain | Runnable"
} }
], ],
"outputs": {}, "outputs": {},
@ -763,9 +754,9 @@
"source": "chatOpenAI_0", "source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationalRetrievalQAChain_0", "target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge", "type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": { "data": {
"label": "" "label": ""
} }

View File

@ -1567,7 +1567,7 @@
"id": "conversationalAgent_0-input-tools-Tool" "id": "conversationalAgent_0-input-tools-Tool"
}, },
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseChatModel", "type": "BaseChatModel",
"id": "conversationalAgent_0-input-model-BaseChatModel" "id": "conversationalAgent_0-input-model-BaseChatModel"

View File

@ -262,7 +262,7 @@
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseChatModel", "type": "BaseChatModel",
"id": "conversationChain_0-input-model-BaseChatModel" "id": "conversationChain_0-input-model-BaseChatModel"

View File

@ -190,7 +190,7 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 1, "version": 2,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
@ -205,47 +205,36 @@
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
}, },
{ {
"label": "System Message", "label": "Rephrase Prompt",
"name": "systemMessagePrompt", "name": "rephrasePrompt",
"type": "string", "type": "string",
"description": "Using previous chat history, rephrase question into a standalone question",
"warning": "Prompt must include input variables: {chat_history} and {question}",
"rows": 4, "rows": 4,
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
}, },
{ {
"label": "Chain Option", "label": "Response Prompt",
"name": "chainOption", "name": "responsePrompt",
"type": "options", "type": "string",
"options": [ "description": "Taking the rephrased question, search for answer from the provided context",
{ "warning": "Prompt must include input variable: {context}",
"label": "MapReduceDocumentsChain", "rows": 4,
"name": "map_reduce",
"description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time"
},
{
"label": "RefineDocumentsChain",
"name": "refine",
"description": "Suitable for QA tasks over a large number of documents."
},
{
"label": "StuffDocumentsChain",
"name": "stuff",
"description": "Suitable for QA tasks over a small number of documents."
}
],
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"id": "conversationalRetrievalQAChain_0-input-chainOption-options" "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
"id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
} }
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseChatModel",
"id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
}, },
{ {
"label": "Vector Store Retriever", "label": "Vector Store Retriever",
@ -267,8 +256,8 @@
"vectorStoreRetriever": "{{vectara_0.data.instance}}", "vectorStoreRetriever": "{{vectara_0.data.instance}}",
"memory": "", "memory": "",
"returnSourceDocuments": true, "returnSourceDocuments": true,
"systemMessagePrompt": "", "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"chainOption": "" "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
}, },
"outputAnchors": [ "outputAnchors": [
{ {
@ -427,9 +416,9 @@
"source": "chatOpenAI_0", "source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationalRetrievalQAChain_0", "target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge", "type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": { "data": {
"label": "" "label": ""
} }

View File

@ -578,7 +578,7 @@
"id": "conversationalAgent_0-input-tools-Tool" "id": "conversationalAgent_0-input-tools-Tool"
}, },
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseChatModel", "type": "BaseChatModel",
"id": "conversationalAgent_0-input-model-BaseChatModel" "id": "conversationalAgent_0-input-model-BaseChatModel"

View File

@ -162,10 +162,10 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 1, "version": 2,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
"category": "Chains", "category": "Chains",
"description": "Document QA - built on RetrievalQAChain to provide a chat history component", "description": "Document QA - built on RetrievalQAChain to provide a chat history component",
"inputParams": [ "inputParams": [
@ -177,47 +177,36 @@
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
}, },
{ {
"label": "System Message", "label": "Rephrase Prompt",
"name": "systemMessagePrompt", "name": "rephrasePrompt",
"type": "string", "type": "string",
"description": "Using previous chat history, rephrase question into a standalone question",
"warning": "Prompt must include input variables: {chat_history} and {question}",
"rows": 4, "rows": 4,
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
}, },
{ {
"label": "Chain Option", "label": "Response Prompt",
"name": "chainOption", "name": "responsePrompt",
"type": "options", "type": "string",
"options": [ "description": "Taking the rephrased question, search for answer from the provided context",
{ "warning": "Prompt must include input variable: {context}",
"label": "MapReduceDocumentsChain", "rows": 4,
"name": "map_reduce",
"description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time"
},
{
"label": "RefineDocumentsChain",
"name": "refine",
"description": "Suitable for QA tasks over a large number of documents."
},
{
"label": "StuffDocumentsChain",
"name": "stuff",
"description": "Suitable for QA tasks over a small number of documents."
}
],
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"id": "conversationalRetrievalQAChain_0-input-chainOption-options" "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
"id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
} }
], ],
"inputAnchors": [ "inputAnchors": [
{ {
"label": "Language Model", "label": "Chat Model",
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseChatModel",
"id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
}, },
{ {
"label": "Vector Store Retriever", "label": "Vector Store Retriever",
@ -239,15 +228,15 @@
"vectorStoreRetriever": "{{pinecone_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}",
"memory": "{{RedisBackedChatMemory_0.data.instance}}", "memory": "{{RedisBackedChatMemory_0.data.instance}}",
"returnSourceDocuments": true, "returnSourceDocuments": true,
"systemMessagePrompt": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given context. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Do not make up any information that is not in the context. Refuse to answer any question not about the info. Never break character.", "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
"chainOption": "" "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
}, },
"outputAnchors": [ "outputAnchors": [
{ {
"id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain", "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable",
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"label": "ConversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain | BaseChain" "type": "ConversationalRetrievalQAChain | BaseChain | Runnable"
} }
], ],
"outputs": {}, "outputs": {},
@ -589,7 +578,7 @@
"label": "Session Id", "label": "Session Id",
"name": "sessionId", "name": "sessionId",
"type": "string", "type": "string",
"description": "If not specified, the first CHAT_MESSAGE_ID will be used as sessionId", "description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat\">more</a>",
"default": "", "default": "",
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
@ -772,9 +761,9 @@
"source": "chatOpenAI_0", "source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationalRetrievalQAChain_0", "target": "conversationalRetrievalQAChain_0",
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"type": "buttonedge", "type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
"data": { "data": {
"label": "" "label": ""
} }

View File

@ -20,7 +20,6 @@ import {
ICredentialReturnResponse, ICredentialReturnResponse,
chatType, chatType,
IChatMessage, IChatMessage,
IReactFlowEdge,
IDepthQueue, IDepthQueue,
INodeDirectedGraph INodeDirectedGraph
} from './Interface' } from './Interface'
@ -39,14 +38,14 @@ import {
databaseEntities, databaseEntities,
transformToCredentialEntity, transformToCredentialEntity,
decryptCredentialData, decryptCredentialData,
clearAllSessionMemory,
replaceInputsWithConfig, replaceInputsWithConfig,
getEncryptionKey, getEncryptionKey,
checkMemorySessionId, getMemorySessionId,
clearSessionMemoryFromViewMessageDialog,
getUserHome, getUserHome,
replaceChatHistory, getSessionChatHistory,
getAllConnectedNodes getAllConnectedNodes,
clearSessionMemory,
findMemoryNode
} from './utils' } from './utils'
import { cloneDeep, omit, uniqWith, isEqual } from 'lodash' import { cloneDeep, omit, uniqWith, isEqual } from 'lodash'
import { getDataSource } from './DataSource' import { getDataSource } from './DataSource'
@ -533,17 +532,18 @@ export class App {
const parsedFlowData: IReactFlowObject = JSON.parse(flowData) const parsedFlowData: IReactFlowObject = JSON.parse(flowData)
const nodes = parsedFlowData.nodes const nodes = parsedFlowData.nodes
if (isClearFromViewMessageDialog) { try {
await clearSessionMemoryFromViewMessageDialog( await clearSessionMemory(
nodes, nodes,
this.nodesPool.componentNodes, this.nodesPool.componentNodes,
chatId, chatId,
this.AppDataSource, this.AppDataSource,
sessionId, sessionId,
memoryType memoryType,
isClearFromViewMessageDialog
) )
} else { } catch (e) {
await clearAllSessionMemory(nodes, this.nodesPool.componentNodes, chatId, this.AppDataSource, sessionId) return res.status(500).send('Error clearing chat messages')
} }
const deleteOptions: FindOptionsWhere<ChatMessage> = { chatflowid, chatId } const deleteOptions: FindOptionsWhere<ChatMessage> = { chatflowid, chatId }
@ -1398,26 +1398,6 @@ export class App {
return await this.AppDataSource.getRepository(ChatMessage).save(chatmessage) return await this.AppDataSource.getRepository(ChatMessage).save(chatmessage)
} }
/**
* Method that find memory label that is connected within chatflow
* In a chatflow, there should only be 1 memory node
* @param {IReactFlowNode[]} nodes
* @param {IReactFlowEdge[]} edges
* @returns {string | undefined}
*/
findMemoryLabel(nodes: IReactFlowNode[], edges: IReactFlowEdge[]): IReactFlowNode | undefined {
const memoryNodes = nodes.filter((node) => node.data.category === 'Memory')
const memoryNodeIds = memoryNodes.map((mem) => mem.data.id)
for (const edge of edges) {
if (memoryNodeIds.includes(edge.source)) {
const memoryNode = nodes.find((node) => node.data.id === edge.source)
return memoryNode
}
}
return undefined
}
async upsertVector(req: Request, res: Response, isInternal: boolean = false) { async upsertVector(req: Request, res: Response, isInternal: boolean = false) {
try { try {
const chatflowid = req.params.id const chatflowid = req.params.id
@ -1586,7 +1566,6 @@ export class App {
* - Still in sync (i.e the flow has not been modified since) * - Still in sync (i.e the flow has not been modified since)
* - Existing overrideConfig and new overrideConfig are the same * - Existing overrideConfig and new overrideConfig are the same
* - Flow doesn't start with/contain nodes that depend on incomingInput.question * - Flow doesn't start with/contain nodes that depend on incomingInput.question
* - Its not an Upsert request
* TODO: convert overrideConfig to hash when we no longer store base64 string but filepath * TODO: convert overrideConfig to hash when we no longer store base64 string but filepath
***/ ***/
const isFlowReusable = () => { const isFlowReusable = () => {
@ -1640,22 +1619,28 @@ export class App {
isStreamValid = isFlowValidForStream(nodes, endingNodeData) isStreamValid = isFlowValidForStream(nodes, endingNodeData)
} }
let chatHistory: IMessage[] | string = incomingInput.history let chatHistory: IMessage[] = incomingInput.history ?? []
// When {{chat_history}} is used in Prompt Template, fetch the chat conversations from memory // When {{chat_history}} is used in Prompt Template, fetch the chat conversations from memory node
for (const endingNode of endingNodes) { for (const endingNode of endingNodes) {
const endingNodeData = endingNode.data const endingNodeData = endingNode.data
if (!endingNodeData.inputs?.memory) continue if (!endingNodeData.inputs?.memory) continue
if (
endingNodeData.inputs?.memory && const memoryNodeId = endingNodeData.inputs?.memory.split('.')[0].replace('{{', '')
!incomingInput.history && const memoryNode = nodes.find((node) => node.data.id === memoryNodeId)
(incomingInput.chatId || incomingInput.overrideConfig?.sessionId)
) { if (!memoryNode) continue
const memoryNodeId = endingNodeData.inputs?.memory.split('.')[0].replace('{{', '')
const memoryNode = nodes.find((node) => node.data.id === memoryNodeId) if (!chatHistory.length && (incomingInput.chatId || incomingInput.overrideConfig?.sessionId)) {
if (memoryNode) { chatHistory = await getSessionChatHistory(
chatHistory = await replaceChatHistory(memoryNode, incomingInput, this.AppDataSource, databaseEntities, logger) memoryNode,
} this.nodesPool.componentNodes,
incomingInput,
this.AppDataSource,
databaseEntities,
logger
)
} }
} }
@ -1714,16 +1699,11 @@ export class App {
logger.debug(`[server]: Running ${nodeToExecuteData.label} (${nodeToExecuteData.id})`) logger.debug(`[server]: Running ${nodeToExecuteData.label} (${nodeToExecuteData.id})`)
let sessionId = undefined const memoryNode = findMemoryNode(nodes, edges)
if (nodeToExecuteData.instance) sessionId = checkMemorySessionId(nodeToExecuteData.instance, chatId)
const memoryNode = this.findMemoryLabel(nodes, edges)
const memoryType = memoryNode?.data.label const memoryType = memoryNode?.data.label
let chatHistory: IMessage[] | string = incomingInput.history let sessionId = undefined
if (memoryNode && !incomingInput.history && (incomingInput.chatId || incomingInput.overrideConfig?.sessionId)) { if (memoryNode) sessionId = getMemorySessionId(memoryNode, incomingInput, chatId, isInternal)
chatHistory = await replaceChatHistory(memoryNode, incomingInput, this.AppDataSource, databaseEntities, logger)
}
const nodeInstanceFilePath = this.nodesPool.componentNodes[nodeToExecuteData.name].filePath as string const nodeInstanceFilePath = this.nodesPool.componentNodes[nodeToExecuteData.name].filePath as string
const nodeModule = await import(nodeInstanceFilePath) const nodeModule = await import(nodeInstanceFilePath)
@ -1731,24 +1711,24 @@ export class App {
let result = isStreamValid let result = isStreamValid
? await nodeInstance.run(nodeToExecuteData, incomingInput.question, { ? await nodeInstance.run(nodeToExecuteData, incomingInput.question, {
chatId,
chatflowid, chatflowid,
chatHistory, chatHistory: incomingInput.history,
socketIO,
socketIOClientId: incomingInput.socketIOClientId,
logger, logger,
appDataSource: this.AppDataSource, appDataSource: this.AppDataSource,
databaseEntities, databaseEntities,
analytic: chatflow.analytic, analytic: chatflow.analytic,
chatId socketIO,
socketIOClientId: incomingInput.socketIOClientId
}) })
: await nodeInstance.run(nodeToExecuteData, incomingInput.question, { : await nodeInstance.run(nodeToExecuteData, incomingInput.question, {
chatId,
chatflowid, chatflowid,
chatHistory, chatHistory: incomingInput.history,
logger, logger,
appDataSource: this.AppDataSource, appDataSource: this.AppDataSource,
databaseEntities, databaseEntities,
analytic: chatflow.analytic, analytic: chatflow.analytic
chatId
}) })
result = typeof result === 'string' ? { text: result } : result result = typeof result === 'string' ? { text: result } : result

View File

@ -26,7 +26,8 @@ import {
getEncryptionKeyPath, getEncryptionKeyPath,
ICommonObject, ICommonObject,
IDatabaseEntity, IDatabaseEntity,
IMessage IMessage,
FlowiseMemory
} from 'flowise-components' } from 'flowise-components'
import { randomBytes } from 'crypto' import { randomBytes } from 'crypto'
import { AES, enc } from 'crypto-js' import { AES, enc } from 'crypto-js'
@ -270,7 +271,7 @@ export const buildLangchain = async (
depthQueue: IDepthQueue, depthQueue: IDepthQueue,
componentNodes: IComponentNodes, componentNodes: IComponentNodes,
question: string, question: string,
chatHistory: IMessage[] | string, chatHistory: IMessage[],
chatId: string, chatId: string,
chatflowid: string, chatflowid: string,
appDataSource: DataSource, appDataSource: DataSource,
@ -317,9 +318,10 @@ export const buildLangchain = async (
await newNodeInstance.vectorStoreMethods!['upsert']!.call(newNodeInstance, reactFlowNodeData, { await newNodeInstance.vectorStoreMethods!['upsert']!.call(newNodeInstance, reactFlowNodeData, {
chatId, chatId,
chatflowid, chatflowid,
chatHistory,
logger,
appDataSource, appDataSource,
databaseEntities, databaseEntities,
logger,
cachePool, cachePool,
dynamicVariables dynamicVariables
}) })
@ -330,9 +332,10 @@ export const buildLangchain = async (
let outputResult = await newNodeInstance.init(reactFlowNodeData, question, { let outputResult = await newNodeInstance.init(reactFlowNodeData, question, {
chatId, chatId,
chatflowid, chatflowid,
chatHistory,
logger,
appDataSource, appDataSource,
databaseEntities, databaseEntities,
logger,
cachePool, cachePool,
dynamicVariables dynamicVariables
}) })
@ -424,66 +427,52 @@ export const buildLangchain = async (
} }
/** /**
* Clear all session memories on the canvas * Clear session memories
* @param {IReactFlowNode[]} reactFlowNodes
* @param {IComponentNodes} componentNodes
* @param {string} chatId
* @param {DataSource} appDataSource
* @param {string} sessionId
*/
export const clearAllSessionMemory = async (
reactFlowNodes: IReactFlowNode[],
componentNodes: IComponentNodes,
chatId: string,
appDataSource: DataSource,
sessionId?: string
) => {
for (const node of reactFlowNodes) {
if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue
const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string
const nodeModule = await import(nodeInstanceFilePath)
const newNodeInstance = new nodeModule.nodeClass()
if (sessionId && node.data.inputs) {
node.data.inputs.sessionId = sessionId
}
if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) {
await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger })
}
}
}
/**
* Clear specific session memory from View Message Dialog UI
* @param {IReactFlowNode[]} reactFlowNodes * @param {IReactFlowNode[]} reactFlowNodes
* @param {IComponentNodes} componentNodes * @param {IComponentNodes} componentNodes
* @param {string} chatId * @param {string} chatId
* @param {DataSource} appDataSource * @param {DataSource} appDataSource
* @param {string} sessionId * @param {string} sessionId
* @param {string} memoryType * @param {string} memoryType
* @param {string} isClearFromViewMessageDialog
*/ */
export const clearSessionMemoryFromViewMessageDialog = async ( export const clearSessionMemory = async (
reactFlowNodes: IReactFlowNode[], reactFlowNodes: IReactFlowNode[],
componentNodes: IComponentNodes, componentNodes: IComponentNodes,
chatId: string, chatId: string,
appDataSource: DataSource, appDataSource: DataSource,
sessionId?: string, sessionId?: string,
memoryType?: string memoryType?: string,
isClearFromViewMessageDialog?: string
) => { ) => {
if (!sessionId) return
for (const node of reactFlowNodes) { for (const node of reactFlowNodes) {
if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue
if (memoryType && node.data.label !== memoryType) continue
// Only clear specific session memory from View Message Dialog UI
if (isClearFromViewMessageDialog && memoryType && node.data.label !== memoryType) continue
const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string
const nodeModule = await import(nodeInstanceFilePath) const nodeModule = await import(nodeInstanceFilePath)
const newNodeInstance = new nodeModule.nodeClass() const newNodeInstance = new nodeModule.nodeClass()
const options: ICommonObject = { chatId, appDataSource, databaseEntities, logger }
if (sessionId && node.data.inputs) node.data.inputs.sessionId = sessionId // SessionId always take priority first because it is the sessionId used for 3rd party memory node
if (sessionId && node.data.inputs) {
if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) { if (node.data.type === 'OpenAIAssistant') {
await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger }) await newNodeInstance.clearChatMessages(node.data, options, { type: 'threadId', id: sessionId })
return } else {
node.data.inputs.sessionId = sessionId
const initializedInstance: FlowiseMemory = await newNodeInstance.init(node.data, '', options)
await initializedInstance.clearChatMessages(sessionId)
}
} else if (chatId && node.data.inputs) {
if (node.data.type === 'OpenAIAssistant') {
await newNodeInstance.clearChatMessages(node.data, options, { type: 'chatId', id: chatId })
} else {
node.data.inputs.sessionId = chatId
const initializedInstance: FlowiseMemory = await newNodeInstance.init(node.data, '', options)
await initializedInstance.clearChatMessages(chatId)
}
} }
} }
} }
@ -500,7 +489,7 @@ export const getVariableValue = (
paramValue: string, paramValue: string,
reactFlowNodes: IReactFlowNode[], reactFlowNodes: IReactFlowNode[],
question: string, question: string,
chatHistory: IMessage[] | string, chatHistory: IMessage[],
isAcceptVariable = false isAcceptVariable = false
) => { ) => {
let returnVal = paramValue let returnVal = paramValue
@ -533,10 +522,7 @@ export const getVariableValue = (
} }
if (isAcceptVariable && variableFullPath === CHAT_HISTORY_VAR_PREFIX) { if (isAcceptVariable && variableFullPath === CHAT_HISTORY_VAR_PREFIX) {
variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters( variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters(convertChatHistoryToText(chatHistory), false)
typeof chatHistory === 'string' ? chatHistory : convertChatHistoryToText(chatHistory),
false
)
} }
// Split by first occurrence of '.' to get just nodeId // Split by first occurrence of '.' to get just nodeId
@ -583,7 +569,7 @@ export const resolveVariables = (
reactFlowNodeData: INodeData, reactFlowNodeData: INodeData,
reactFlowNodes: IReactFlowNode[], reactFlowNodes: IReactFlowNode[],
question: string, question: string,
chatHistory: IMessage[] | string chatHistory: IMessage[]
): INodeData => { ): INodeData => {
let flowNodeData = cloneDeep(reactFlowNodeData) let flowNodeData = cloneDeep(reactFlowNodeData)
const types = 'inputs' const types = 'inputs'
@ -970,21 +956,43 @@ export const redactCredentialWithPasswordType = (
} }
/** /**
* Replace sessionId with new chatId * Get sessionId
* Ex: after clear chat history, use the new chatId as sessionId * Hierarchy of sessionId (top down)
* API/Embed:
* (1) Provided in API body - incomingInput.overrideConfig: { sessionId: 'abc' }
* (2) Provided in API body - incomingInput.chatId
*
* API/Embed + UI:
* (3) Hard-coded sessionId in UI
* (4) Not specified on UI nor API, default to chatId
* @param {any} instance * @param {any} instance
* @param {IncomingInput} incomingInput
* @param {string} chatId * @param {string} chatId
*/ */
export const checkMemorySessionId = (instance: any, chatId: string): string | undefined => { export const getMemorySessionId = (
if (instance.memory && instance.memory.isSessionIdUsingChatMessageId && chatId) { memoryNode: IReactFlowNode,
instance.memory.sessionId = chatId incomingInput: IncomingInput,
instance.memory.chatHistory.sessionId = chatId chatId: string,
isInternal: boolean
): string | undefined => {
if (!isInternal) {
// Provided in API body - incomingInput.overrideConfig: { sessionId: 'abc' }
if (incomingInput.overrideConfig?.sessionId) {
return incomingInput.overrideConfig?.sessionId
}
// Provided in API body - incomingInput.chatId
if (incomingInput.chatId) {
return incomingInput.chatId
}
} }
if (instance.memory && instance.memory.sessionId) return instance.memory.sessionId // Hard-coded sessionId in UI
else if (instance.memory && instance.memory.chatHistory && instance.memory.chatHistory.sessionId) if (memoryNode.data.inputs?.sessionId) {
return instance.memory.chatHistory.sessionId return memoryNode.data.inputs.sessionId
return undefined }
// Default chatId
return chatId
} }
/** /**
@ -996,31 +1004,52 @@ export const checkMemorySessionId = (instance: any, chatId: string): string | un
* @param {any} logger * @param {any} logger
* @returns {string} * @returns {string}
*/ */
export const replaceChatHistory = async ( export const getSessionChatHistory = async (
memoryNode: IReactFlowNode, memoryNode: IReactFlowNode,
componentNodes: IComponentNodes,
incomingInput: IncomingInput, incomingInput: IncomingInput,
appDataSource: DataSource, appDataSource: DataSource,
databaseEntities: IDatabaseEntity, databaseEntities: IDatabaseEntity,
logger: any logger: any
): Promise<string> => { ): Promise<IMessage[]> => {
const nodeInstanceFilePath = memoryNode.data.filePath as string const nodeInstanceFilePath = componentNodes[memoryNode.data.name].filePath as string
const nodeModule = await import(nodeInstanceFilePath) const nodeModule = await import(nodeInstanceFilePath)
const newNodeInstance = new nodeModule.nodeClass() const newNodeInstance = new nodeModule.nodeClass()
// Replace memory's sessionId/chatId
if (incomingInput.overrideConfig?.sessionId && memoryNode.data.inputs) { if (incomingInput.overrideConfig?.sessionId && memoryNode.data.inputs) {
memoryNode.data.inputs.sessionId = incomingInput.overrideConfig.sessionId memoryNode.data.inputs.sessionId = incomingInput.overrideConfig.sessionId
} else if (incomingInput.chatId && memoryNode.data.inputs) {
memoryNode.data.inputs.sessionId = incomingInput.chatId
} }
if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.getChatMessages) { const initializedInstance: FlowiseMemory = await newNodeInstance.init(memoryNode.data, '', {
return await newNodeInstance.memoryMethods.getChatMessages(memoryNode.data, { appDataSource,
chatId: incomingInput.chatId, databaseEntities,
appDataSource, logger
databaseEntities, })
logger
})
}
return '' return (await initializedInstance.getChatMessages()) as IMessage[]
}
/**
* Method that find memory that is connected within chatflow
* In a chatflow, there should only be 1 memory node
* @param {IReactFlowNode[]} nodes
* @param {IReactFlowEdge[]} edges
* @returns {string | undefined}
*/
export const findMemoryNode = (nodes: IReactFlowNode[], edges: IReactFlowEdge[]): IReactFlowNode | undefined => {
const memoryNodes = nodes.filter((node) => node.data.category === 'Memory')
const memoryNodeIds = memoryNodes.map((mem) => mem.data.id)
for (const edge of edges) {
if (memoryNodeIds.includes(edge.source)) {
const memoryNode = nodes.find((node) => node.data.id === edge.source)
return memoryNode
}
}
return undefined
} }
/** /**

View File

@ -280,6 +280,7 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA
style={{ style={{
display: 'flex', display: 'flex',
flexDirection: 'row', flexDirection: 'row',
alignItems: 'center',
borderRadius: 10, borderRadius: 10,
background: 'rgb(254,252,191)', background: 'rgb(254,252,191)',
padding: 10, padding: 10,
@ -287,7 +288,7 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA
marginBottom: 10 marginBottom: 10
}} }}
> >
<IconAlertTriangle size={36} color='orange' /> <IconAlertTriangle size={30} color='orange' />
<span style={{ color: 'rgb(116,66,16)', marginLeft: 10 }}>{inputParam.warning}</span> <span style={{ color: 'rgb(116,66,16)', marginLeft: 10 }}>{inputParam.warning}</span>
</div> </div>
)} )}