From 40f8371de955f3401f2263382de0c1c4505a9473 Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 4 Dec 2023 20:04:09 +0000 Subject: [PATCH 01/24] add llamaindex --- .../ConversationalAgent.ts | 19 +- .../ConversationalRetrievalAgent.ts | 24 +- .../agents/OpenAIAssistant/OpenAIAssistant.ts | 100 +- .../OpenAIFunctionAgent.ts | 24 +- .../ConversationChain/ConversationChain.ts | 15 +- .../ConversationalRetrievalQAChain.ts | 42 +- .../AzureChatOpenAI_LlamaIndex.ts | 135 +++ .../chatmodels/ChatAnthropic/ChatAnthropic.ts | 63 +- .../ChatAnthropic/ChatAnthropic_LlamaIndex.ts | 94 ++ .../nodes/chatmodels/ChatAnthropic/utils.ts | 61 ++ .../ChatOpenAI/ChatOpenAI_LlamaIndex.ts | 148 +++ .../AzureOpenAIEmbedding_LlamaIndex.ts | 77 ++ .../OpenAIEmbedding_LlamaIndex.ts | 68 ++ .../engine/ChatEngine/ContextChatEngine.ts | 178 ++++ .../engine/ChatEngine/SimpleChatEngine.ts | 171 ++++ .../nodes/engine/ChatEngine/chat-engine.png | Bin 0 -> 10012 bytes .../engine/ChatEngine/context-chat-engine.png | Bin 0 -> 9768 bytes .../nodes/engine/QueryEngine/QueryEngine.ts | 126 +++ .../nodes/engine/QueryEngine/query-engine.png | Bin 0 -> 12383 bytes .../nodes/memory/BufferMemory/BufferMemory.ts | 47 +- .../BufferWindowMemory/BufferWindowMemory.ts | 45 +- .../ConversationSummaryMemory.ts | 49 +- .../nodes/memory/DynamoDb/DynamoDb.ts | 219 ++++- .../memory/MongoDBMemory/MongoDBMemory.ts | 115 ++- .../memory/MotorheadMemory/MotorheadMemory.ts | 86 +- .../RedisBackedChatMemory.ts | 98 +- .../UpstashRedisBackedChatMemory.ts | 102 ++- .../nodes/memory/ZepMemory/ZepMemory.ts | 148 ++- .../CompactRefine/CompactRefine.ts | 75 ++ .../CompactRefine/compactrefine.svg | 1 + .../responsesynthesizer/Refine/Refine.ts | 75 ++ .../responsesynthesizer/Refine/refine.svg | 1 + .../SimpleResponseBuilder.ts | 35 + .../SimpleResponseBuilder/simplerb.svg | 1 + .../TreeSummarize/TreeSummarize.ts | 56 ++ .../TreeSummarize/treesummarize.svg | 1 + .../nodes/responsesynthesizer/base.ts | 11 + .../Pinecone/Pinecone_LlamaIndex.ts | 366 ++++++++ .../vectorstores/SimpleStore/SimpleStore.ts | 124 +++ .../vectorstores/SimpleStore/simplevs.svg | 6 + packages/components/package.json | 3 +- packages/components/src/Interface.ts | 5 +- packages/components/src/utils.ts | 53 +- .../chatflows/Context Chat Engine.json | 855 ++++++++++++++++++ .../chatflows/Long Term Memory.json | 20 +- .../marketplaces/chatflows/Query Engine.json | 509 +++++++++++ .../chatflows/Simple Chat Engine.json | 270 ++++++ .../marketplaces/chatflows/WebPage QnA.json | 2 +- packages/server/src/index.ts | 68 +- packages/server/src/utils/index.ts | 98 +- packages/ui/src/assets/images/llamaindex.png | Bin 0 -> 28343 bytes .../src/ui-component/dialog/NodeInfoDialog.js | 29 + packages/ui/src/utils/genericHelper.js | 1 + packages/ui/src/views/canvas/AddNodes.js | 89 +- packages/ui/src/views/canvas/CanvasNode.js | 19 +- .../marketplaces/MarketplaceCanvasNode.js | 18 + 56 files changed, 4509 insertions(+), 536 deletions(-) create mode 100644 packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts create mode 100644 packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts create mode 100644 packages/components/nodes/chatmodels/ChatAnthropic/utils.ts create mode 100644 packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts create mode 100644 packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts create mode 100644 packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts create mode 100644 packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts create mode 100644 packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts create mode 100644 packages/components/nodes/engine/ChatEngine/chat-engine.png create mode 100644 packages/components/nodes/engine/ChatEngine/context-chat-engine.png create mode 100644 packages/components/nodes/engine/QueryEngine/QueryEngine.ts create mode 100644 packages/components/nodes/engine/QueryEngine/query-engine.png create mode 100644 packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts create mode 100644 packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg create mode 100644 packages/components/nodes/responsesynthesizer/Refine/Refine.ts create mode 100644 packages/components/nodes/responsesynthesizer/Refine/refine.svg create mode 100644 packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts create mode 100644 packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg create mode 100644 packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts create mode 100644 packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg create mode 100644 packages/components/nodes/responsesynthesizer/base.ts create mode 100644 packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts create mode 100644 packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts create mode 100644 packages/components/nodes/vectorstores/SimpleStore/simplevs.svg create mode 100644 packages/server/marketplaces/chatflows/Context Chat Engine.json create mode 100644 packages/server/marketplaces/chatflows/Query Engine.json create mode 100644 packages/server/marketplaces/chatflows/Simple Chat Engine.json create mode 100644 packages/ui/src/assets/images/llamaindex.png diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 8a2329b58..cb5d31897 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -2,7 +2,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter import { initializeAgentExecutorWithOptions, AgentExecutor, InitializeAgentExecutorOptions } from 'langchain/agents' import { Tool } from 'langchain/tools' import { BaseChatMemory } from 'langchain/memory' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { getBaseClasses } from '../../../src/utils' import { BaseChatModel } from 'langchain/chat_models/base' import { flatten } from 'lodash' import { additionalCallbacks } from '../../../src/handler' @@ -90,18 +90,17 @@ class ConversationalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const executor = nodeData.instance as AgentExecutor - const memory = nodeData.inputs?.memory as BaseChatMemory + const memory = nodeData.inputs?.memory + memory.returnMessages = true // Return true for BaseChatModel - if (options && options.chatHistory) { - const chatHistoryClassName = memory.chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - memory.chatHistory = mapChatHistory(options) - executor.memory = memory - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) } - ;(executor.memory as any).returnMessages = true // Return true for BaseChatModel + executor.memory = memory const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts index 643c6a658..ce5b5e185 100644 --- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts +++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts @@ -1,6 +1,6 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { getBaseClasses } from '../../../src/utils' import { flatten } from 'lodash' import { BaseChatMemory } from 'langchain/memory' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' @@ -58,8 +58,8 @@ class ConversationalRetrievalAgent_Agents implements INode { async init(nodeData: INodeData): Promise { const model = nodeData.inputs?.model - const memory = nodeData.inputs?.memory as BaseChatMemory const systemMessage = nodeData.inputs?.systemMessage as string + const memory = nodeData.inputs?.memory as BaseChatMemory let tools = nodeData.inputs?.tools tools = flatten(tools) @@ -78,19 +78,21 @@ class ConversationalRetrievalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const executor = nodeData.instance as AgentExecutor + const memory = nodeData.inputs?.memory - if (executor.memory) { - ;(executor.memory as any).memoryKey = 'chat_history' - ;(executor.memory as any).outputKey = 'output' - ;(executor.memory as any).returnMessages = true + memory.memoryKey = 'chat_history' + memory.outputKey = 'output' + memory.returnMessages = true - const chatHistoryClassName = (executor.memory as any).chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - ;(executor.memory as any).chatHistory = mapChatHistory(options) - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) } + executor.memory = memory + const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts index 7f2377bde..1a48be508 100644 --- a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts +++ b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts @@ -81,50 +81,8 @@ class OpenAIAssistant_Agents implements INode { } } - async init(): Promise { - return null - } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const selectedAssistantId = nodeData.inputs?.selectedAssistant as string - const appDataSource = options.appDataSource as DataSource - const databaseEntities = options.databaseEntities as IDatabaseEntity - let sessionId = nodeData.inputs?.sessionId as string - - const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({ - id: selectedAssistantId - }) - - if (!assistant) { - options.logger.error(`Assistant ${selectedAssistantId} not found`) - return - } - - if (!sessionId && options.chatId) { - const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({ - chatId: options.chatId - }) - if (!chatmsg) { - options.logger.error(`Chat Message with Chat Id: ${options.chatId} not found`) - return - } - sessionId = chatmsg.sessionId - } - - const credentialData = await getCredentialData(assistant.credential ?? '', options) - const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) - if (!openAIApiKey) { - options.logger.error(`OpenAI ApiKey not found`) - return - } - - const openai = new OpenAI({ apiKey: openAIApiKey }) - options.logger.info(`Clearing OpenAI Thread ${sessionId}`) - if (sessionId) await openai.beta.threads.del(sessionId) - options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`) - } + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + return new OpenAIAssistant({ nodeData, options }) } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { @@ -459,4 +417,58 @@ const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.AssistantCreatePara } } +interface OpenAIAssistantInput { + nodeData: INodeData + options: ICommonObject +} + +class OpenAIAssistant { + nodeData: INodeData + options: ICommonObject = {} + + constructor(fields: OpenAIAssistantInput) { + this.nodeData = fields.nodeData + this.options = fields.options + } + + async clearChatMessages(): Promise { + const selectedAssistantId = this.nodeData.inputs?.selectedAssistant as string + const appDataSource = this.options.appDataSource as DataSource + const databaseEntities = this.options.databaseEntities as IDatabaseEntity + let sessionId = this.nodeData.inputs?.sessionId as string + + const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({ + id: selectedAssistantId + }) + + if (!assistant) { + this.options.logger.error(`Assistant ${selectedAssistantId} not found`) + return + } + + if (!sessionId && this.options.chatId) { + const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({ + chatId: this.options.chatId + }) + if (!chatmsg) { + this.options.logger.error(`Chat Message with Chat Id: ${this.options.chatId} not found`) + return + } + sessionId = chatmsg.sessionId + } + + const credentialData = await getCredentialData(assistant.credential ?? '', this.options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, this.nodeData) + if (!openAIApiKey) { + this.options.logger.error(`OpenAI ApiKey not found`) + return + } + + const openai = new OpenAI({ apiKey: openAIApiKey }) + this.options.logger.info(`Clearing OpenAI Thread ${sessionId}`) + if (sessionId) await openai.beta.threads.del(sessionId) + this.options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`) + } +} + module.exports = { nodeClass: OpenAIAssistant_Agents } diff --git a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts index 96ba7ea38..781292d72 100644 --- a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts +++ b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts @@ -1,6 +1,6 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { getBaseClasses } from '../../../src/utils' import { BaseLanguageModel } from 'langchain/base_language' import { flatten } from 'lodash' import { BaseChatMemory } from 'langchain/memory' @@ -56,8 +56,8 @@ class OpenAIFunctionAgent_Agents implements INode { async init(nodeData: INodeData): Promise { const model = nodeData.inputs?.model as BaseLanguageModel - const memory = nodeData.inputs?.memory as BaseChatMemory const systemMessage = nodeData.inputs?.systemMessage as string + const memory = nodeData.inputs?.memory as BaseChatMemory let tools = nodeData.inputs?.tools tools = flatten(tools) @@ -69,25 +69,23 @@ class OpenAIFunctionAgent_Agents implements INode { prefix: systemMessage ?? `You are a helpful AI assistant.` } }) - if (memory) executor.memory = memory - + executor.memory = memory return executor } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const executor = nodeData.instance as AgentExecutor - const memory = nodeData.inputs?.memory as BaseChatMemory + const memory = nodeData.inputs?.memory + memory.returnMessages = true // Return true for BaseChatModel - if (options && options.chatHistory) { - const chatHistoryClassName = memory.chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - memory.chatHistory = mapChatHistory(options) - executor.memory = memory - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) } - ;(executor.memory as any).returnMessages = true // Return true for BaseChatModel + executor.memory = memory const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index 7887ce97b..aa9b1a8ac 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -1,6 +1,6 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConversationChain } from 'langchain/chains' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { getBaseClasses } from '../../../src/utils' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts' import { BufferMemory } from 'langchain/memory' import { BaseChatModel } from 'langchain/chat_models/base' @@ -105,15 +105,14 @@ class ConversationChain_Chains implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as ConversationChain - const memory = nodeData.inputs?.memory as BufferMemory + const memory = nodeData.inputs?.memory memory.returnMessages = true // Return true for BaseChatModel - if (options && options.chatHistory) { - const chatHistoryClassName = memory.chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - memory.chatHistory = mapChatHistory(options) - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) } chain.memory = memory diff --git a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts index 9a8c1b188..d8fb42257 100644 --- a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts +++ b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts @@ -1,9 +1,9 @@ import { BaseLanguageModel } from 'langchain/base_language' -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses } from '../../../src/utils' import { ConversationalRetrievalQAChain, QAChainParams } from 'langchain/chains' import { BaseRetriever } from 'langchain/schema/retriever' -import { BufferMemory, BufferMemoryInput } from 'langchain/memory' +import { BufferMemoryInput, BufferMemory } from 'langchain/memory' import { PromptTemplate } from 'langchain/prompts' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { @@ -158,7 +158,7 @@ class ConversationalRetrievalQAChain_Chains implements INode { returnMessages: true } if (chainOption === 'refine') fields.outputKey = 'output_text' - obj.memory = new BufferMemory(fields) + obj.memory = new BufferMemoryExtended(fields) } const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStoreRetriever, obj) @@ -178,12 +178,11 @@ class ConversationalRetrievalQAChain_Chains implements INode { const obj = { question: input } - if (options && options.chatHistory && chain.memory) { - const chatHistoryClassName = (chain.memory as any).chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - ;(chain.memory as any).chatHistory = mapChatHistory(options) - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && chain.memory && (chain.memory as any).isShortTermMemory) { + await (chain.memory as any).resumeMessages(options.chatHistory) } const loggerHandler = new ConsoleCallbackHandler(options.logger) @@ -216,4 +215,27 @@ class ConversationalRetrievalQAChain_Chains implements INode { } } +class BufferMemoryExtended extends BufferMemory { + isShortTermMemory = true + + constructor(fields: BufferMemoryInput) { + super(fields) + } + + async clearChatMessages(): Promise { + await this.clear() + } + + async resumeMessages(messages: IMessage[]): Promise { + // Clear existing chatHistory to avoid duplication + if (messages.length) await this.clear() + + // Insert into chatHistory + for (const msg of messages) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } + } +} + module.exports = { nodeClass: ConversationalRetrievalQAChain_Chains } diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts new file mode 100644 index 000000000..850c2bc59 --- /dev/null +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts @@ -0,0 +1,135 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex' + +interface AzureOpenAIConfig { + apiKey?: string + endpoint?: string + apiVersion?: string + deploymentName?: string +} + +class AzureChatOpenAI_LlamaIndex_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'AzureChatOpenAI' + this.name = 'azureChatOpenAI_LlamaIndex' + this.version = 1.0 + this.type = 'AzureChatOpenAI' + this.icon = 'Azure.svg' + this.category = 'Chat Models' + this.description = 'Wrapper around Azure OpenAI Chat LLM with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['azureOpenAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-32k', + name: 'gpt-4-32k' + }, + { + label: 'gpt-3.5-turbo', + name: 'gpt-3.5-turbo' + }, + { + label: 'gpt-3.5-turbo-16k', + name: 'gpt-3.5-turbo-16k' + } + ], + default: 'gpt-3.5-turbo-16k', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS + const temperature = nodeData.inputs?.temperature as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData) + const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData) + const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) + const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) + + const obj: Partial & { azure?: AzureOpenAIConfig } = { + temperature: parseFloat(temperature), + model: modelName, + azure: { + apiKey: azureOpenAIApiKey, + endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`, + apiVersion: azureOpenAIApiVersion, + deploymentName: azureOpenAIApiDeploymentName + } + } + + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAI(obj) + return model + } +} + +module.exports = { nodeClass: AzureChatOpenAI_LlamaIndex_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts index 358a15d1e..8209c04a2 100644 --- a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts @@ -3,6 +3,7 @@ import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../ import { AnthropicInput, ChatAnthropic } from 'langchain/chat_models/anthropic' import { BaseCache } from 'langchain/schema' import { BaseLLMParams } from 'langchain/llms/base' +import { availableModels } from './utils' class ChatAnthropic_ChatModels implements INode { label: string @@ -42,67 +43,7 @@ class ChatAnthropic_ChatModels implements INode { label: 'Model Name', name: 'modelName', type: 'options', - options: [ - { - label: 'claude-2', - name: 'claude-2', - description: 'Claude 2 latest major version, automatically get updates to the model as they are released' - }, - { - label: 'claude-2.1', - name: 'claude-2.1', - description: 'Claude 2 latest full version' - }, - { - label: 'claude-instant-1', - name: 'claude-instant-1', - description: 'Claude Instant latest major version, automatically get updates to the model as they are released' - }, - { - label: 'claude-v1', - name: 'claude-v1' - }, - { - label: 'claude-v1-100k', - name: 'claude-v1-100k' - }, - { - label: 'claude-v1.0', - name: 'claude-v1.0' - }, - { - label: 'claude-v1.2', - name: 'claude-v1.2' - }, - { - label: 'claude-v1.3', - name: 'claude-v1.3' - }, - { - label: 'claude-v1.3-100k', - name: 'claude-v1.3-100k' - }, - { - label: 'claude-instant-v1', - name: 'claude-instant-v1' - }, - { - label: 'claude-instant-v1-100k', - name: 'claude-instant-v1-100k' - }, - { - label: 'claude-instant-v1.0', - name: 'claude-instant-v1.0' - }, - { - label: 'claude-instant-v1.1', - name: 'claude-instant-v1.1' - }, - { - label: 'claude-instant-v1.1-100k', - name: 'claude-instant-v1.1-100k' - } - ], + options: [...availableModels], default: 'claude-2', optional: true }, diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts new file mode 100644 index 000000000..b989ef760 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts @@ -0,0 +1,94 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { Anthropic } from 'llamaindex' +import { availableModels } from './utils' + +class ChatAnthropic_LlamaIndex_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + tags: string[] + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatAnthropic' + this.name = 'chatAnthropic_LlamaIndex' + this.version = 1.0 + this.type = 'ChatAnthropic' + this.icon = 'chatAnthropic.png' + this.category = 'Chat Models' + this.description = 'Wrapper around ChatAnthropic LLM with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(Anthropic)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['anthropicApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [...availableModels], + default: 'claude-2', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokensToSample', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top P', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as string + const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string + const topP = nodeData.inputs?.topP as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData) + + const obj: Partial = { + temperature: parseFloat(temperature), + model: modelName, + apiKey: anthropicApiKey + } + + if (maxTokensToSample) obj.maxTokens = parseInt(maxTokensToSample, 10) + if (topP) obj.topP = parseFloat(topP) + + const model = new Anthropic(obj) + return model + } +} + +module.exports = { nodeClass: ChatAnthropic_LlamaIndex_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts b/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts new file mode 100644 index 000000000..209996a69 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts @@ -0,0 +1,61 @@ +export const availableModels = [ + { + label: 'claude-2', + name: 'claude-2', + description: 'Claude 2 latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-2.1', + name: 'claude-2.1', + description: 'Claude 2 latest full version' + }, + { + label: 'claude-instant-1', + name: 'claude-instant-1', + description: 'Claude Instant latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-v1', + name: 'claude-v1' + }, + { + label: 'claude-v1-100k', + name: 'claude-v1-100k' + }, + { + label: 'claude-v1.0', + name: 'claude-v1.0' + }, + { + label: 'claude-v1.2', + name: 'claude-v1.2' + }, + { + label: 'claude-v1.3', + name: 'claude-v1.3' + }, + { + label: 'claude-v1.3-100k', + name: 'claude-v1.3-100k' + }, + { + label: 'claude-instant-v1', + name: 'claude-instant-v1' + }, + { + label: 'claude-instant-v1-100k', + name: 'claude-instant-v1-100k' + }, + { + label: 'claude-instant-v1.0', + name: 'claude-instant-v1.0' + }, + { + label: 'claude-instant-v1.1', + name: 'claude-instant-v1.1' + }, + { + label: 'claude-instant-v1.1-100k', + name: 'claude-instant-v1.1-100k' + } +] diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts new file mode 100644 index 000000000..147bfe3ff --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts @@ -0,0 +1,148 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex' + +class ChatOpenAI_LlamaIndex_LLMs implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatOpenAI' + this.name = 'chatOpenAI_LlamaIndex' + this.version = 1.0 + this.type = 'ChatOpenAI' + this.icon = 'openai.png' + this.category = 'Chat Models' + this.description = 'Wrapper around OpenAI Chat LLM with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-1106-preview', + name: 'gpt-4-1106-preview' + }, + { + label: 'gpt-4-vision-preview', + name: 'gpt-4-vision-preview' + }, + { + label: 'gpt-4-0613', + name: 'gpt-4-0613' + }, + { + label: 'gpt-4-32k', + name: 'gpt-4-32k' + }, + { + label: 'gpt-4-32k-0613', + name: 'gpt-4-32k-0613' + }, + { + label: 'gpt-3.5-turbo', + name: 'gpt-3.5-turbo' + }, + { + label: 'gpt-3.5-turbo-1106', + name: 'gpt-3.5-turbo-1106' + }, + { + label: 'gpt-3.5-turbo-0613', + name: 'gpt-3.5-turbo-0613' + }, + { + label: 'gpt-3.5-turbo-16k', + name: 'gpt-3.5-turbo-16k' + }, + { + label: 'gpt-3.5-turbo-16k-0613', + name: 'gpt-3.5-turbo-16k-0613' + } + ], + default: 'gpt-3.5-turbo', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + + const obj: Partial = { + temperature: parseFloat(temperature), + model: modelName, + apiKey: openAIApiKey + } + + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAI(obj) + return model + } +} + +module.exports = { nodeClass: ChatOpenAI_LlamaIndex_LLMs } diff --git a/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts new file mode 100644 index 000000000..38e454027 --- /dev/null +++ b/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts @@ -0,0 +1,77 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAIEmbedding } from 'llamaindex' + +interface AzureOpenAIConfig { + apiKey?: string + endpoint?: string + apiVersion?: string + deploymentName?: string +} + +class AzureOpenAIEmbedding_LlamaIndex_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + tags: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'Azure OpenAI Embeddings' + this.name = 'azureOpenAIEmbeddingsLlamaIndex' + this.version = 1.0 + this.type = 'AzureOpenAIEmbeddings' + this.icon = 'Azure.svg' + this.category = 'Embeddings' + this.description = 'Azure OpenAI API embeddings with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['azureOpenAIApi'] + } + this.inputs = [ + { + label: 'Timeout', + name: 'timeout', + type: 'number', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData) + const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData) + const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) + const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) + + const obj: Partial & { azure?: AzureOpenAIConfig } = { + azure: { + apiKey: azureOpenAIApiKey, + endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`, + apiVersion: azureOpenAIApiVersion, + deploymentName: azureOpenAIApiDeploymentName + } + } + + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAIEmbedding(obj) + return model + } +} + +module.exports = { nodeClass: AzureOpenAIEmbedding_LlamaIndex_Embeddings } diff --git a/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts new file mode 100644 index 000000000..4ff780e32 --- /dev/null +++ b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts @@ -0,0 +1,68 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAIEmbedding } from 'llamaindex' + +class OpenAIEmbedding_LlamaIndex_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'OpenAI Embedding' + this.name = 'openAIEmbedding_LlamaIndex' + this.version = 1.0 + this.type = 'OpenAIEmbedding' + this.icon = 'openai.png' + this.category = 'Embeddings' + this.description = 'OpenAI Embedding with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } + this.inputs = [ + { + label: 'Timeout', + name: 'timeout', + type: 'number', + optional: true, + additionalParams: true + }, + { + label: 'BasePath', + name: 'basepath', + type: 'string', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + + const obj: Partial = { + apiKey: openAIApiKey + } + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAIEmbedding(obj) + return model + } +} + +module.exports = { nodeClass: OpenAIEmbedding_LlamaIndex_Embeddings } diff --git a/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts b/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts new file mode 100644 index 000000000..dd77601a8 --- /dev/null +++ b/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts @@ -0,0 +1,178 @@ +import { ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ContextChatEngine, ChatMessage } from 'llamaindex' + +class ContextChatEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Context Chat Engine' + this.name = 'contextChatEngine' + this.version = 1.0 + this.type = 'ContextChatEngine' + this.icon = 'context-chat-engine.png' + this.category = 'Engine' + this.description = 'Answer question based on retrieved documents (context) with built-in memory to remember conversation' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Vector Store Retriever', + name: 'vectorStoreRetriever', + type: 'VectorIndexRetriever' + }, + { + label: 'Memory', + name: 'memory', + type: 'BaseChatMemory' + }, + { + label: 'System Message', + name: 'systemMessagePrompt', + type: 'string', + rows: 4, + optional: true, + placeholder: + 'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.' + } + ] + } + + async init(nodeData: INodeData): Promise { + const model = nodeData.inputs?.model + const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever + const memory = nodeData.inputs?.memory + + const chatEngine = new ContextChatEngine({ chatModel: model, retriever: vectorStoreRetriever }) + ;(chatEngine as any).memory = memory + return chatEngine + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const chatEngine = nodeData.instance as ContextChatEngine + const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const memory = nodeData.inputs?.memory + + const chatHistory = [] as ChatMessage[] + + let sessionId = '' + if (memory) { + if (memory.isSessionIdUsingChatMessageId) sessionId = options.chatId + else sessionId = nodeData.inputs?.sessionId + } + + if (systemMessagePrompt) { + chatHistory.push({ + content: systemMessagePrompt, + role: 'user' + }) + } + + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) + } + + const msgs: IMessage[] = await memory.getChatMessages(sessionId) + for (const message of msgs) { + if (message.type === 'apiMessage') { + chatHistory.push({ + content: message.message, + role: 'assistant' + }) + } else if (message.type === 'userMessage') { + chatHistory.push({ + content: message.message, + role: 'user' + }) + } + } + + if (options.socketIO && options.socketIOClientId) { + let response = '' + const stream = await chatEngine.chat(input, chatHistory, true) + let isStart = true + const onNextPromise = () => { + return new Promise((resolve, reject) => { + const onNext = async () => { + try { + const { value, done } = await stream.next() + if (!done) { + if (isStart) { + options.socketIO.to(options.socketIOClientId).emit('start') + isStart = false + } + options.socketIO.to(options.socketIOClientId).emit('token', value) + response += value + onNext() + } else { + resolve(response) + } + } catch (error) { + reject(error) + } + } + onNext() + }) + } + + try { + const result = await onNextPromise() + if (memory) { + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: result, + type: 'apiMessage' + } + ], + sessionId + ) + } + return result as string + } catch (error) { + throw new Error(error) + } + } else { + const response = await chatEngine.chat(input, chatHistory) + if (memory) { + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: response?.response, + type: 'apiMessage' + } + ], + sessionId + ) + } + return response?.response + } + } +} + +module.exports = { nodeClass: ContextChatEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts b/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts new file mode 100644 index 000000000..9ae9c2f1a --- /dev/null +++ b/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts @@ -0,0 +1,171 @@ +import { ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ChatMessage, SimpleChatEngine } from 'llamaindex' + +class SimpleChatEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Simple Chat Engine' + this.name = 'simpleChatEngine' + this.version = 1.0 + this.type = 'SimpleChatEngine' + this.icon = 'chat-engine.png' + this.category = 'Engine' + this.description = 'Simple engine to handle back and forth conversations' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Memory', + name: 'memory', + type: 'BaseChatMemory' + }, + { + label: 'System Message', + name: 'systemMessagePrompt', + type: 'string', + rows: 4, + optional: true, + placeholder: 'You are a helpful assistant' + } + ] + } + + async init(nodeData: INodeData): Promise { + const model = nodeData.inputs?.model + const memory = nodeData.inputs?.memory + + const chatEngine = new SimpleChatEngine({ llm: model }) + ;(chatEngine as any).memory = memory + return chatEngine + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const chatEngine = nodeData.instance as SimpleChatEngine + const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const memory = nodeData.inputs?.memory + + const chatHistory = [] as ChatMessage[] + + let sessionId = '' + if (memory) { + if (memory.isSessionIdUsingChatMessageId) sessionId = options.chatId + else sessionId = nodeData.inputs?.sessionId + } + + if (systemMessagePrompt) { + chatHistory.push({ + content: systemMessagePrompt, + role: 'user' + }) + } + + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) + } + + const msgs: IMessage[] = await memory.getChatMessages(sessionId) + for (const message of msgs) { + if (message.type === 'apiMessage') { + chatHistory.push({ + content: message.message, + role: 'assistant' + }) + } else if (message.type === 'userMessage') { + chatHistory.push({ + content: message.message, + role: 'user' + }) + } + } + + if (options.socketIO && options.socketIOClientId) { + let response = '' + const stream = await chatEngine.chat(input, chatHistory, true) + let isStart = true + const onNextPromise = () => { + return new Promise((resolve, reject) => { + const onNext = async () => { + try { + const { value, done } = await stream.next() + if (!done) { + if (isStart) { + options.socketIO.to(options.socketIOClientId).emit('start') + isStart = false + } + options.socketIO.to(options.socketIOClientId).emit('token', value) + response += value + onNext() + } else { + resolve(response) + } + } catch (error) { + reject(error) + } + } + onNext() + }) + } + + try { + const result = await onNextPromise() + if (memory) { + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: result, + type: 'apiMessage' + } + ], + sessionId + ) + } + return result as string + } catch (error) { + throw new Error(error) + } + } else { + const response = await chatEngine.chat(input, chatHistory) + if (memory) { + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: response?.response, + type: 'apiMessage' + } + ], + sessionId + ) + } + return response?.response + } + } +} + +module.exports = { nodeClass: SimpleChatEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/ChatEngine/chat-engine.png b/packages/components/nodes/engine/ChatEngine/chat-engine.png new file mode 100644 index 0000000000000000000000000000000000000000..d614b8887ee270317dd5e98041da40cca59d9043 GIT binary patch literal 10012 zcmeHtXH=6*_wSPgLXje((whc6pcJKqBIO_@0RcstQWX^e=^z~vk*3l@QRxSw2uM?! zp~cEk4j=)fLj;v35TrvO{|WE^-gVz~@0a(({c_h`3)alDpFMl;nb|YXZ+?@+3+Be$ zoWh(C1aV_A29^*62UR%4js(SONZ&RnID9de10aa!9rGX7?(_UQ5DEnv+67+nxfvLI z)&B+*92~6R?(G@ide!%af{(vj&f-~N2$F=b27g+GXy*a)RlJA+GiEf_8uxo@bv<&-K<3FA%!dzHsGWm9^wjb+{ zm&Pw1W5simrHRUHmNou7kST5eDoH!a_)xQe5l(H#x*_+(8wEGbZfjX?_;W$V@FiAr zSQhKXG55_ywpJFpysx9LqNSTZC!~nbVQI!UZxgJN%ke4j`C^f@;jDN1S`D-U!LbBX zCrNnou&;aIaZCGS2yzd3h+Ko$#!&vyMeYeUs*(6g-IZawE0@V7Scy{t(O3Gj|e)z&4D$SeTi{TH1e2 zk9sn6cr3xnSO7+4p&p}Zh|y1B=iWvV57H`TjEL@aTSv%Femy4vD_t}%hM9!RyK zyfXQU(7HB~CNuC=2#3Harh6}^6Qy=wvu|z$8>5ez6(vEwf6%lDDm_4~Xkm|h@u>8o1&`(r`8ShI!LDgBSSrxm&bY^>gq#8e*a^LrGGszL6iRYVa~D#GysQ|0$E@qW?i&JZ1;ut%_Jk0QqguE^bS zC;B1CP1ucBCWJW+{Hokl**L$7gLaA33Hc>AAzyC@W1DXIl$$;>h)- zq3xT2$K`DMg>b553)-GF)?=85s!q+GF|vAA<_UFu_~1+!RywEoPA*pP2;_7?bj12N z!K(04ssd+*CRz@PAih#Sl{$3Qah{$~h1y}db1(U@a)ZmnhH{gw!orhB{m+l199{fa+|~P4PDPG1x1F+qg3xi^}wZ`5FcC%XKWeK}XY_ENjjd z3Si}icF88xG!RbQST~e=??D^N(9S5#vAQp@<<8DduRiJ4I1i<_PA#?4tq~u1)Ac0n zmWwlC?y^DHy+s(&ZMF*b6=tlw*U{;JN7EMz%y+jEFIi}XI|M_~NYO#sAm;Ok93^vA zB(WfVuJ0YlAj^_2$vtX8y&8g%7|0z%^p}U}K)0o$oU-f3$2Om1j^%`O%ATs=z+m6wb(7LTM_Sf&u7<{uHaaAx5bICSF`q}MEhoZk& zSdQTM?S(k7=K>D7cO~RE$dtYVkM6u(s6CuSgE+$6+f5L~-IusJe;((C?tc<}Y!9~) zBUNhV#-|&dWX~9+2x2V!w3gZE>T4hBk$RSV3NlgRA|>8l?mJIUTsES=(J}XuavVB| z?Y&}7@=_1eDK8Tvzq%rf;J{dPg6jRCKR;Sr}uiKkCNP6x;C#d*a=Ie3xy zHk)Q-P~2}>$$33y55HBhtQ+KE(J!V#aM|#_IccwBj*TBA%s)DgKLSJ|j zocr8b`8#9o(|i$A_PPM31OW+o95euH3Wklz*cm14jK0XvfgLqQU;I`%gG6y#E(UZE zm{CQtL-zZ|cnyIusL)1UQv1#sw*yRLc3shOU(W|32&mFVPhHf0LkIL++4~_`Pnxi;-Ze z9{c&ukF~lnX2COOU-b!4^;vQ5TUM>zmhz?CbDg+9gP$2;D>V7;UbCV(*jlf_3^zS7 zf1*qdoxko=A7wzYK68E*T3tXWeJ*{UG9X*wb0d5|$WMkTw$?vT@l&1&Yl6N!v=__4 zh5<$BRtpK-#Vo`IpRSYr%(VQK{SgZ+YA-MIPwQA#{XWb^H+FgRc!$W>BQo}gS1)rV zQKI~{X2TCJubBNH^JZ$w${fB2MsT;Z9uQR;_bycob1g69wMiQ|r&Nz%Yao5-^k>f8 zGXZTI>&%$Z~j#Zs36hJJccwd7vofiyej4WHi0?$Z^5cB@z>^L`QPAluI zbE?1On}$aZKP{KHFJH<@mRd0V5USy;N*jH~a6#xrdKQZaUUdvaxbCM^-?QA5DuSp< zSjdIq6n-_RFW4#&aeVa~TIpY@`gvdTaleh&vU!u(*jY6FMt@)t^?ufkA ztT)44&|1YqZXc`CXvrK1;yD;0Y4`^nSj1 zTl|2NYef(DL)wM^)tll|Hl$-$MmL8Nh}9Ov z`h9dHH#hvB*9{sP8;=8ggj05uqT>mb9FI5O*duY<-pqhRibMqWXRJi+M?}OdM-`-_ zIU9W)bf*M3NDc4_@LxPVJ^u6isB2e?``N{JdzY<~{wu+oc$b+XwkazUx0K0V-?v&h7KXy8{SWKtUJI|lMMHR7Vqw+1j1UbjxRVal|BolQrltF2La zW$wWZawJ$0H(d-nMuw5hsrAw9nX>$V!hP|W%3s_O5{JUyM())Mlp3dLZb2>jpoiAUCLPNa72 zCx5zH_uD?et=x)^==!UmE!28HwvYVUZEHb$^K+Xovr9WRnnk8W%F9}{V((Puw>;W& z^i}2F?`bXAN|sif?b{#q-(G;6YD>ff$Ly@R-}qA$lPO8=^J4)b%J(h_6Hq}{wP=?qLEH?%(T>`$ z3)|=(_xCKPWKCA-Kj{NLZt#>t(Yfa>gMX1=J)N7d)WgzYQ2~4NZRt3#_Yx=5`@T_a z!IJFvlcmLzU3-$wyNC%wC~Voh2st_#>{;{V5lA|-bIZbvBp6~308=Bkofnj9b!J*u z)m_8)-7n{WdVaf~OdG1M2WT|_9aTdic$gl6_|pLz6&ial$fFEl2z|M)<5_0ue{DM1Xq_CxCpJTSXl;ZU!m$3wT78h6aQ0 z@72V8)7po}^kSuTIPEMyMsVo3iC#Wph$_F;l^l5_tbSDeY^hOF!1C)41C=!kA&(xkb}Zw2C>tIOfCNcd;fx* z{s3ii7O?jsk!ep%3=Cpt5BIIXn}0JTwf1pltpW+p`I_PX3?R+LWbdN4J>;@|Q2U0Q z_CL`p)L?+VconHGPvXGF_zQ38vPo~ezO0Q#j0%JK*3sMB_+>H|FHO{t4sN^et@ln1 zF)9dN*udNj&{FOP$Js;=f=6%Wt9mHps4wFeqYIpF_{fpq=r>ofi<`&08KW()o^bcx z3kp15cEc?YQTz|Y3l{;KN%=r;*YNWk5U&{Cz3 zbl97v0i1W_AH#k2lbN+Ll$blcy&{#B5uc7TPAq8qSg}#3%r$4X>StxGx9zP>O2c}H z%p&Bm*5fSnxXn;cg650Nvb6bqTc~(= z@0fvvsVf`q!I@h(vq_HwZ&aZ=ha@&RmsfgK*0zp*Ja@V%Iw~s}M zmtErRO&K|?nm;q;etlt&R!{}=@eW*Z{RKsF-i9h!y(ME~D#*w2k=Qb~)79wN*}`~) zaoYG?Lma(eK{vSy@_FE`pEM*iM*gFT4cCy$qxT^+*m0zFu18kG8Ic7bT_-bcR6 zw(;w$@WSKRlDy0YdE!J|+u<=x$2c~O?Vj6HmFrsAfTiwYk&DG)K}d+INws%G7bVKG zwPLnALlP#!72#cMdoW;W0`0eK+x|10mDYhPC*3xvbKq&tTTS>bWoHsb?q#w*n!hcn z#yw`w{Vo^49xFmn1aF9;apd#XCAd!^V?s)SRY7`eSUS@M;j16W?_u~A=F%7WDthR$ zfXCZ%oCP>%^;%Lz8aK;NF%R`~ToCb+WqVLhg76B@cIb)r(aAO@TXasBB{{`gzv9z2 zm!36bhoH0A4gRU<@o^V?AT|zf#Fp$@7q#Py@dbwHqOaG*pfd5iQ2AN8fVK8B)b8S( z$2g!+767_ZSf&g?-B2Z)P5tPD>=kxj!L2si(FwYBSvc-Ejtz`Rc~~SEkpW^!RjKY# zCZ;U9+qPAHJ^KEGw!29INSDQlCb*CFY`PY%|N9nPN^$ejr%K4l^kZLD8KEZN4LLL%OZHY0PTTk zyK#*GK{PQczn4mp4cI+g1Of!;uQ$uBB-p+hqoq2Zd_RfPIDk#5j0k<#@;sCwu-P&r zLqkW;no#?R;SB5K7j5vzG7xl@tx;IL_Kj0tb0@j=jW!tDK!$$UP=o`5Av0=q`I zlNuAo-7X8v=yLj|-FiWi%?ls3)y^VeDd*y5*? zcvH+0&jp?A(ROQkr}NVh&gRU(j@pbhL!6l+k0MK}VVFnZ?=RL6NZZ*(m{Us0?%KT~ zuqjB98?ZjbmzovexCd<}!3SoGW^#%|F50~0fy^8b)lK#oUXJak1JU)rr-Jw7#xltg z)XA!R_v4UWm=2kPTribh75C2$-TGx7xddaexuO%jW-d??KLB@zZ0^urty+04xPtCq z2i;FzUzjnlr?%(Ya)$+@O@0jS`OCt~~W-aWY<9%?kq7!n|CxjVW$IO?Eo1{=fO z5Q-eX{$ep(aGjVdIkbCl)0H|6Cg#ygo(GNvu)+QyqV08Yiz{{+iXbo*+w|6#XT=4g zOM?`psHc79@B4I47ag!_C0kr>h59OwaePp4H6ZKij)yIlH|adrU*&o6YF|aWXT>>h?N3*r z(!1yasAdn_RZ;bY?ZVTBOJw&u`;8RC#B*;(eqkvJrV_OV0R>6o(-tr%>D!;a8!7kQbe;3B9d1|-HU9rqBnr=^1&3FVe1XH5-rV{RC5hnW(^+RTU z?Wzn3=5E4dSz};&k>7z^RUxPk4Fr7<0=NAh*c847U2tv$&SJQrIRvvik70VR0Ub>| zDhOpLGX(&=z4m7cgrJP$Ou-W%@B;!KpurCWa|uA;%M<_&rj0pZqsM_REsKNcr7=sO zaFJQ2!u0r}vo4k}1fA z>6tM##0O83VB9994;cDXK^OUIq&nP=1iEsx1>s0Sh;j%!UXG;n6F!Aq6$*4 zvaI$)w1%(5@>DCU4`0T%T-e_8e>HecIU0@G*fLJpamX9_U2o!HygGW(x=c2Vt=>^u zVXyqwPbpoq{yS8vO@p1KNYQk1+K&#Lib-kry6%JKO0qLovo3&)l=Rq=#;mgU6LhH- zsTH2d_UMgaOxSEHaW5esZItB()t-2|_mkK6RA_7Av>qh~DP78kgcd5le~_S@j0~t# z0EFnftI(H96K6Us`M$Rc3jqHfV44FQ=;`kO`SC5ZhZ`fzgTRqLMB4&LEGRWSj#>i> z3VRk=D|x&-1^^PP11@7%hJ+V(pclwZ+b?9L4yI!H)>vGqe7YUa_Kiyzf3N8*s92gk_w9O%X!kq*uL|B~8s z0}tvXJV)cEZO87z`?Qu|?H9A%f=Ppf}|p5u3VS>lV|{6ASA^(fbMF!Tr|@2!q=rt5Hp;vKO$pS@0v z^iAHG#x+4&)}6RNDUZ$J;?hw%f(_#XT}tc}krC3_BdgX9^qfEzUcQ~Q11)Vm| zDWc7L-ZH-K%%Say7F@tBMmCONzPFLHeo?}EYN$?}waeoM3e`CsLwx1iQ^3H^t%Hk+ z@W5lfyq!z$lgOXMyp7!zj_t*0nmL{)9lC58zFwj$KwofTUT>(p<)rw`*_XDh)WaD3 zKHqMQgeNcx&FweRyO5jJlBJg|5PK{kxXtdZ+Muwh;-Z>gymZ#ttIFw%wf1=4^tbnh z8}6PhSX9T3VKr~^c!b<0Qbf5TSOyK~(}MtHI{9~| zu2SoVwmHDnA0ueu+HxzhS%Z{_{3Y2^>j9VtUxcjaZB}uPU#6csuTd)c%5m@T#TcJ8 z`*%u}b=#b?p{ig975*VlFz$OP0Vwdxk-tly(mz5)_sW4 zlGi^mY_1uXbtyR#zcR~ERXJDw%6LHqYY~K6mT)L%)CsA_ehU|vHMH)T^3r~N2T~NS znLu?;<-Cu^Rb1_x28c-{+mOuqKBcN|nsj^5@q~NEBR&SKKJLsWNL^6dxV1(ezMPf1D@F zQEyTLRx(S%F)0pFzI|=qg&_AcZAhKNWA~7yif-_(w;yzaqfI{SNsHWuAY>l%w*dQg zp4C8&aCLBp%1y~6Cz9)DjMiPGX62vijOaSsLRHd^Eb0W9UUnA%@dN7*ybWS9JlWNg z1JMtE2cl_{Lv1#{PeSgRIk;Y0W|c?#mERH9sKbC5s1hTh6_e{W!I!&iN{f7=DCdhy z1#7FJlLgmVITEaMe(L;YpKcpGb?!1I>~Tj2=!eUX)gZDWe%cuQS4ksbwU*L;Vk?CU zvW)D8RU&zkfa^?C_Cffm1B-uJtSgdD@!=@0n_C(0>~q zDZOQJ*f`R`cAEDzc(W}|Gv|VNSUBekKEHE&AHkO#RfJ~@FPl&gQJN4$HG^iY!r!z0 z9{o1d3Zi|gCt#fxLZxQkRv?N%e}cr|T#(atm;CAV8aM`%Tf>p0?@Fd<*rW&U*4T#w)9^;YY-ov~9)TA%6fXs_`i{OYRw%^)Opa9J*A2bATPXu3YdlWpYb3F| zx@240?1mIgj;c?cNGRxg5oI%`4C~q5cq%TF_a^biXj3k1-UZ4bED=lrZ2;*>p7O4KR$eEcp&=*N(Bw#k}I3vWs0#3X8`k9#vfXrCo z1TV4()(uYlB?O^LHR7PPIAACH2Kxm?z_XTy#z8l_L7)ptEPr?cvmPl@dP9J}v~1#n zeFkftFzm*D;SOLz`?$~HUS5MziHm~p3~oSk26Y@PJjFB-9tXvOF}CD7htmf>WRs=z zu7U=K!TW>2;(cIX8a!vPonvyN_M8y{gYISy;1(uZiVQrDGoqalQ$8Cy)hHO-dGX445(tkNN(6b-D$ydYJdSm z$(|g+p_xAedh%thhkG>%3~OzZP8?1G@TXYHg;Ub8xS8IaslIFgK?uZSYu~bQ&fkQ* zums^p;q05f;W-Le%OU?=P3l!D?r4KOfjIdiM24DP;a!8m!hdtZHsIa2g9D_olw8qP zoAbX zFK)eUMOVabutyC`Af9eH(wP2JfYF3UGtUGlcl~Y_9RxjY`vAj+T800S=Z!~Uvxi&B z(X8HXzhfCj)G2aQUa(^-D}cQOQ)pok?z#E{p+=?ZrjeX|gfAo_t d0xFTg8_o4jX?{4y4E(YU!Wx#dcnGqY#*p0;Q2J-?Y-4tAE@9AX>*0B{ot zW+VVW!bv3X7Y6<;U+&w0KkOj{=P&@^dA;{XybCTl4--Wx=1!E8!9JA8vzNSq$jC@F z-yr`mud^ZEYQdK-G&|~eId6XC$~}ow`MWv$hX{N1~f%!u<8HI3n$T>(?N{KVo{f9hDAdVt4h8>m-Oe ztA{lFm#XYBYqB@|j{*0%0yu5ZM-WFOwl-J5(9l4lU;PEJ2{fR7r5b==L051GYNcEC zX#%&=b(qxyGz>pC>S$~Axl4(W3Yv@13$nP>TBvY=zz}*}q&k?)OVf7gMvftjv4#S} z$=b@Nq^?FH?10sh%Wg#F*I|%7m`=b8Sf!je1q_L-2eWhpQxu7(a(J(D>)u%|dFUA3 zk+JnV8)Zyvr26>1_ac%qyg>=79AOPJEb#TtbBcEXVn}>lzdGoa7l9<<{?`HY$HX8r zF*{Foldwb6`n%?rm$Ea8w+R;z9>ODu+If%E5#`)&rcniBO`G!L0kTU%WI@*cJa-43 zlm8rzvS(RB$sAkvX?>VB^p5Bxk3))pAi#s{dL_Dr^VMAgswB3`1S%7wfl+K;3O0@? z_WmX$NEdZ@9MXaTcP*n@+ivHw%QFH!xNPKXlN4^ujPHlop<>RN=*m(R>2V`BP{kI@ZFBIU zF!4jLEuC*EfFk$78b9{EIF)BdCA%*fH-3?u9K*T*i&f9ZfX2*(u-yI8X>_+h_F>*( zk&(n-z0*IUA4uDhWr)h}2EmG=TnA(Q1}tlw?tAE6)-Wt!;WXKR>_L(9YkTaD6yeOn zmOCF+r)4ztgAH_1)MJwB{C=ocH}Em;)U+-+eoCP)R?Z#igsPWtdiux%M|}Slnthb( z(3C=9XMLsnpPa&s zu9F}@Xcj#|$~=wgQizqRzZg^9r+Rwzh~mz1W5>uJT-RymT6iztS>OH!EWM+>ICbCP z{yOiU9_+{`ciIeDisk%$Eg40ZkAF-Ww%WpO*+U<^gG+GXXL*@Oqu$MTKR%2)jQesI zJ{aO%L)Fg|(xalZes2D4Z8c`)J+1mCwt5oPl+l_=(~k0#jWE%}I#n)LI7YOZ&tW&; z{KB80MIA0!w|-ou7Ne4x-*ju!bd39u>yCmIsXIb$l>`wV2S0rF4EgldYR_I}jxa>% zW(TLrkg@mq={ZAWm5cmCpKFHkuBbdTGnk ze935mL^+nQSVb4p1O$qCheUPxbb%q=y8QO2I~1s1c%46Fo^0e0D&>zbKpFt*r~&MN zyu@;U%O=PP{sud!=!YYOb6gsHx)H8(s64I!(Gf!O2rAEBKXFUQ$!V#35#@oHv*!ps z$+`1qBf)@b4@~ZVv8(h-2D*!CZ;@O-4b@vsAZoy3>V0~R+ZyjqOpyo>rG?hz98zga z`)Gr;!?qsZ@cEov3r2DcL>cWI3ZGPFbwbB`Q*M(S0S6;n?1B@$QvOD*%Y%8bze=Bc zrBQP9XI5&M&R`f=k@d?%_5Blwm(Z5i`!y}B6Nqs65Pj-Q0^jb2^Gf9fkdw-b`08-{ zgt|?7AW?stfBV}r(n`9WV&tW(Ou3XwB& z=1tt?^0e8Yf;g8eDy%HAW$zTXwqob7bXd0R%vqKf-LhS!pYB>Je!{V@QM zmNRceueEPHw^!+p9dhS!t_k_ybUQEx9GP9_6#;NWzXT#=Pra4D)xF2^#fy_^cGTK? zubTTP`_^yfj^bX6^S|ZEzX-eW2vZt;)meOf`TwLNHh}7tklo(B{b!IO@6I8zYEs%- zfULY5^yqv#vAA@Au1BQ+_YgMN0``T+0j`KLbG(diUj!-wt>(F9A_C0J`;e=q6qKJ~ zLim<6$@wggo4+-doLK>^hb)iMOKM9urxb82|LAf7rB4&6Rmd^49p=uhz)7^7V#P}% zmBxMEi;^tbl)~X72q#G=-g{O#SBn=`kr6;_+f$ zJOQ-i%VFJ&4*me-;+00K>bWdPMu ze=K75R=;$B_kfh{P7YN1Z*I+B$ThF{+(~ZO#J*jRU=8h2ujSbe8!G;rGDN%aYuD|m zglRT9#o+Cp66BenQ<|FhXu6SXDgLk?DfNt9YuDLp>Y~q{g(uRG=0+IZm;cRV|3^#z z|JnYZ9OZwUzCHgc*82>?^x9?wNG@?{Xf>v(sq&TsHi<#>^Xaf5bmT#qwgmDH^N*OB zW58lik=h2l;2P+J?iCi)p^qTzfVu-)o0-&1WHgO_OayQg_99>hGl>_BR&NJlK^0g8 z{n=$+*g}aOhE*&>X3tpt_awz<&o28+nBk>s^AUww_He?xeN2yW9yZ0pydUod9sBV2 zNG~@b@B%RP@hYnnb`v(k*DnBcrYN8vxyP)q<({rg2BwC)sXK5f*OnpdIIuAL2sTe} zU1pM@J@?h10ZTeYmKm66x1Sr9P?*4m1BZ+I~b1?$qKvvKJuI zf`)pbZ_x3VEd<-&e=P;E9LNzN>ZB|#pk>pzntdshS#*td6ECsOOO|7a)2;nz$lEDF zh4K?XG1ou`hynw_a4-|O>37YW+E4Xv)Ajxd%EFv663^@*PZS7832!-lbt50v#;%cwKwon_mdXLhn`^6hNLHG5O zXXwE0pe&ZT4Rm8xtI5>~f2c+Tk`hd;nTABEKOUflg>+kSne9MfqXGWYQ+b9XY;4{C zk;^phQexD?^_mpH$!qPgxV^wKA8r6|I(*$=bW3rs(C^v~Y()2#w2y6{_1iAY1m?C1 zF;fF5Vc!wcn79pU$}{6x0Q&QnlQcc{dpT=>1~9mZHQ)nmGqzFq<86bt33g{ zz$fJciwdj>4w&)*rU-bJhm?1p(#^u%fF&xa$*Oj`)`^2sr!?;1ZX5|8n_jOC`jEYI2h1*9jCV zY{)Eeuqu(U1M$!TG>zUmMD*iAfjKk0Py{q7Iue5j19veVn5{af701womqhu)2k0r( zPxt@xI%Yx$IfzmO%FMM(PwQR37c+UcY)JQ%@;|zUbu`=b4=cE(`!8Mg4M{qo?jdg@ zBvFzI`8t;kODu_{f++CIgf9yZt0-yY%KzPu4~>&z3L3?K#KZF`Yx9>_q(@J^-+ki` z`7YtO^nZ!veA+>M#;&UTqQGh1f8+j@gA9b}jVY85I>kt5Z+(vS8Gi%{k1Zp**-~0g z_0-cGub0H!`?roHx7cR?(H~Z%ssXkm++qfP4=S>zQ43sRo8xrnuQ-OUhq&&l1l6m| zd?RsF!xrG-5p$(Q{TwI^#UAt#v|$I`6J7bHzHrpp(~_QR`krMZ#!R3D10IwLU*fuB zhytonzN!&Kza+0)x#s=8Y+BW)AgcL2>d&`pH1SmHSBYYY1}_ zUY<7v9>?G_dIvFi$!tNcN-0J~P+l7VhJ}aTIJgt$rgexe!xqq3;+Vv7ZX&L#C+L7B05TGN&4sJTLe??t6%XFu#^NT2vv0?y82qvfK>oK-p1(yTQ?~D zkG?1w0=H#sIS*_fl`5UC_WNAv0v{xaJ(l)?BAbWpXl`Cnxvf1{G zZJ#Uvo~+*zgm|?Ir;vM_T-gM zdh|`99pp!`t;xfk^0qD%^`&Um1nr%Fx-H>BML*G>VV@LeDeB@|eFQP^P}Ea;0cUH{ ztkJkH)9=<;d(l?Tdf(UUjS1TL7lV^o0f9_>d30i2)uFOmD-jv-CEbJj=Y@tG_-FN) z2fmf@u3hu-5oFG4X0NZcKg)4h`}XEyL*mA5Zhsf@N!tob>Y0?~M?PfB8O<`fw@%CG zjZ6DhM8yY$-l#=??#sb8htY-W-rm8*wo*F$j&B$E3>OuiyMDFHVgnpO=DDyi6Xy(< zY+0#O7RsxdELZw`wszDuw29qSzHvXXILy+uh8j1sAsAes2~8%(wWyYI4|=a zO*t}5?d29YZ_Rv#liYgzeJhiq)EVEV+&>@o#DwjRN>N%?bRY$V9=frt>fJeIK^M9r zHk2TMq+5lDv6D{}BQ1qL6FNE-N3`)KjcF(JfpyJk4?g1CdzLO~Aubd-jx(FqK=K9< zVv&~ed{~#cwQft%4dH>Cl3htJ;QPg@(trUp6x4>?JxNv%#OpOuPps=s>%|G<(voUY zwYzse!)RD=%SWB<10fnqDc7QWn|eKhZfRiF4=su2^uKV7{0-t!V<|yqGvQBf-GG{i zV*>p=UerZE>Q<1M-~D2+g^=Ax5AovM&?^SCsOsOQzboY0e9^1@#y~Ud7O<;ws?QYq zkEM0k#tIIJ*1v!rFlNe+<*?qxw>g$`f`LC-RXIvgEz`4vPp>^3m%2|i44^BK9{XUA zEz1e=QwLG@Y-_SDro9M&6-tg{y@vAk$F?TbqNiDHz7#o9@-Vu2zb&)azK$06-Cum`Weg#3$$**B8jXyE`_%vC zGIRM^WSW}bKO}L%pWg*pB21n4Q-iOc<#?Q-Ekx3y1Xo0PvGHi7-qsZfi6x3WS3{6bxB^YQ1C@}~(|G{}Wxz((=+@InXoz%j7vggq>YM=GnX%Z)9-6rduz@Ho+73no_G3LPdfQgZrfv2)9D4GP2P4y z<D`A#3#+iKAk?iM{EZ|xH@xe zrbFGLoM<;iV9FDCDs}ZPMq9Y9rAba^;N)dCe>}|$$oWvsH(2|zY&b< z;>Npx9vgn@1LUR!o_WKcxNR&6QyGop;HkV!s%LL`{O%1pBj0Gu2&qkt;hIL^;A zKmQ3Dj3J~?m`U|DG2|>7Drgx+sWG+L@Znv9H3`wDEE*|iDJU8}O%#}!y~yGsyHVs| zpglR_v(8oh#;&8;rA+m*0&X{%mrLHN9sORw6fY!&yAK#uI2$bAa=vu(%jTgb; z{LREKo?XiVVMgU#TeOG+TZn-q81550H(waa4d^VLW6s4kv8|oJo!LyMcbUzra|17r zpei3a*KCHLYnsB~7&4DmvoWCQS5!7?)(lU&cE%DfP2_$W-BYi{1!%5fo*+&>5?6Y^ z0a?SbHa#brJ@f*&r~x=GdV;@wN9STkqWs~mIfft#_zd%pDWekU56}L`mHS6=e{00ms;m zKnw9TdB%E8yE>o5fHSk9k{^gU)H0ituV5c)A773$o5J zmlxB5Fp?+~SPDb96R)}9V*ov=!rbMz_ivUl(3Kn{P0I$(%~nBWSsQ9FW)r|WY`@hS z2~zA&4`h3B13z`7*0nWal@>E`I{i+tZ8H46)^L2KiS(i#!3C@^zJf6pu}wHe=qeB- z8!Odxxuxl2N)Qrdx;+!c6l3SNygY_ubY-D=FqOF22aWU`>&AVW{tXz=1a+aW@NU|S zC`%EJ;RT;5aQ&9FB0c{}KV^THtqWOziy$%!$>9RaX zq`+&jWPJ_(d;G)6UoOb_7H{i@a89rNW5qO578?~I8!(?uEq!HEL^wt%1KX+zr;ne7%vMwF$`l0)k_v-GkLyFYmTXCM{3;qa=74)LtlS}M?Cwbl4 z@Il4VxUBx`QGB|IS1T-Hg8gdr%RYH7Rr-|ABMPO~r$`?0g4DMD!ZRIyR_=tH&clki z`J^hX;Vy5+TKzK{YK5XY&3>O z@l3|lu5dv$bc1b!e#@L({l?%9KbH}!rs5h&PB*Kq*?^VPgHJPx1z;eP#~xWFlmBGS z(PTQ{WzAy>u6A&OzI0W8x@*1eiowzheC?9{2Z|=#ZRF_ydaiz=oYm012$dT`GR0E<~WEOrviw!}X?k$apYFE$ivwPiCT3p4{K{2M*u4)%{7!(Y{*q@^WJIR! z2u{BT@7PHZ#d;BR^7e@w%}7Y0m;0h4h7h^+U~FDW8_<3YOjmaCl6GGH!9oejB9!ML3~YJkXO2nDD$b zA;3YZ?a|QZ?GGBJIWv?q%=;X~w_om3Ru{*JTAa$kYpDxpgPZ$<{_cY9{mH%^BzLKu(JkRpOElP8hpfy6|wP2yINgUdMn^|LD4R(6_)GZ$QmZ=juxjt%w zUn;mkb>J@FW|ZscuYQug8-wEq2Yhvg>fHP=zPnRq4uY9N;eV$;^B8`2i|>AWzV1GmN!VPbUstN9f_<$FO*$uej;MeVXiLM8B%NP zyFa6rDu~*o<@hKp`La}Q?_Bx$iNN`>#2}22{$j)~M{2z5Wm{Vs<^7AgOWZ(eL5s=r zYoV2>)yJd!TYbIf$!O;O$0{r(t(BmdJL?ho=u&z_C?+S6t3> zhcB;&BDCS1nQ(ATphun_pG}W)x)W#UDG`%YUk;xc**yBj{rSV$p&A~Jr&`kjwxUuq zQfYRf)};?R33uJhrOG>CpZI!e%ccrp`uTJ6_u~-bAH8?i`fxC6PP8t!*sEYoWk@tj zVgPSnOO{8}t~3mYoCsZ{Mb&UDaWiX&r{cq%b+Wa?*9c))5g?-?6Rb+lYLw_qSjX!! z;``34D~^~6$5gA;EGQTywBlen!ThW;$m`H{{N{lWwds8(#7=zHbAd&h@4N%v1NNYf z@Ye{Gsn4ZYt`Oh$wAOtAmK^9ZTF^{1@-n!5dF&K)@&vb)a;G(6?N_r*W7>Wr)B$__ zC3W0_CbX8;N+Hr5#W!hBhEsOhJ$y#~nu9?>95Jfk7CqD8t6})cF(y(61KwUQ$-$)r z90@rztuQvmwnoY_X1(pTEpdTi@tKeLFoGvX=y0=6@c~llUEHZpK92~jANKv?&Pao= zIr~}^sb1h21RlF4o00@W>=uj4WLFl7-hiUF!&p8kSSWRZYlv$-76$mVKr|EBR z@Bj;rQ_e{-Jd3NT>(~3%kAG zE?na7l+X~|dj|pE8Zg9;hhgV9hT$D~9xh9AXAd}@V?aI~qW%HKVrZp!nDYsw_p;^S zNq96#S`xbVV9GX}*V@ZRBC7SeBnml1VM&rVCwPV=*WCkDU>FoeM8@qQHMmoIk4IGZ zhQ8rUSpb81c4{0C!oBVvC?T)fw93qma9_H`}3n#)IoYi4mxgiHQ!23~V$JE4a-s#JLH7%oo z8`eHEsX5`QNpKv-Gl?{|X7+n_ThzFc%7c+%@eENFC#k zT#DmxVk}o=;E3uy*k<0#v>6n{J|0|vgWr^h(e>Ci7!#~GaTt#zk^<-0hlvf?-3!$D z(l63PTHk$ds{P5XVFV|kOvH*t&-uc)?wMq;-p#gJMd8E|nI#e&9}8c^@2=2wIB-N# z{2|_}Ny}3ncl4Lc}KW$5{`v=(U1@Y}Jf=>C% sgt1T1YpA^E&i~e%Ciw9blXFW-ahrm(QhJVsUwHvUb33!jW8`c90~=RKX8-^I literal 0 HcmV?d00001 diff --git a/packages/components/nodes/engine/QueryEngine/QueryEngine.ts b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts new file mode 100644 index 000000000..7059c2606 --- /dev/null +++ b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts @@ -0,0 +1,126 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { + RetrieverQueryEngine, + BaseNode, + Metadata, + ResponseSynthesizer, + CompactAndRefine, + TreeSummarize, + Refine, + SimpleResponseBuilder +} from 'llamaindex' + +class QueryEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Query Engine' + this.name = 'queryEngine' + this.version = 1.0 + this.type = 'QueryEngine' + this.icon = 'query-engine.png' + this.category = 'Engine' + this.description = 'Simple query engine built to answer question over your data, without memory' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Vector Store Retriever', + name: 'vectorStoreRetriever', + type: 'VectorIndexRetriever' + }, + { + label: 'Response Synthesizer', + name: 'responseSynthesizer', + type: 'ResponseSynthesizer', + description: + 'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more', + optional: true + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever + const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + + if (responseSynthesizerObj) { + if (responseSynthesizerObj.type === 'TreeSummarize') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate), + serviceContext: vectorStoreRetriever.serviceContext + }) + return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'CompactAndRefine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new CompactAndRefine( + vectorStoreRetriever.serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext: vectorStoreRetriever.serviceContext + }) + return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'Refine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new Refine( + vectorStoreRetriever.serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext: vectorStoreRetriever.serviceContext + }) + return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext), + serviceContext: vectorStoreRetriever.serviceContext + }) + return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } + } + + const queryEngine = new RetrieverQueryEngine(vectorStoreRetriever) + return queryEngine + } + + async run(nodeData: INodeData, input: string): Promise { + const queryEngine = nodeData.instance as RetrieverQueryEngine + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + + const response = await queryEngine.query(input) + if (returnSourceDocuments && response.sourceNodes?.length) + return { text: response?.response, sourceDocuments: reformatSourceDocuments(response.sourceNodes) } + + return response?.response + } +} + +const reformatSourceDocuments = (sourceNodes: BaseNode[]) => { + const sourceDocuments = [] + for (const node of sourceNodes) { + sourceDocuments.push({ + pageContent: (node as any).text, + metadata: node.metadata + }) + } + return sourceDocuments +} + +module.exports = { nodeClass: QueryEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/QueryEngine/query-engine.png b/packages/components/nodes/engine/QueryEngine/query-engine.png new file mode 100644 index 0000000000000000000000000000000000000000..68efdbe0090cb34c90444842d256b0a52f17b4b9 GIT binary patch literal 12383 zcmd6Oc{tSX_xF2dFhxeCl4VeqB1>f-Ygxu5l#)Uy*|QX5XJ%3%J7r%!F(qUv*<(hD z$`)BdW~O8(V;_u}=dI7@^E}_*-_P^CuIIYAX58;{pZlD1pZ8f__bvK@8Jd?{h#LR^ zUW~DU1pq+7uTWqY7x=Lf*tZFO?DjFf<_iFb_dCCkHt!d=z(GMj!>fLmyq*05ZrpVO z0s;aQ+`K$|9dGzJDR|#?$@;A&1OVay#^B85!0h?akT;^1VeG|#-@AAJKwR9nOQrdd z9xLC6H48cF3#N@x!U5GWXb>HPw&T8 zLwKT)yYxwcZ}7lHGSz-p2k;!GxreEtA^Vr)0JMZqh_{5{NMg0HT4b&7U%Bzz-){K> z#9Ju3EG>$F+Iv_m)7U~`9jYtz!h|sX>P}`e0)MU=Nf#z>;q774t*uFY@L56$z8uCj zYqo;V0WXxt@M^#@z?<{=ht{M6Q-ZX!G!HTkl67-zB@GEYxv6zhjB$cSqsb*_P9CC} zkkyQ>cB8rx_j~0EBoW1j80YQr)+HeTptL*y>KALl&$5u5L;3)@Qcuc_9d*=2d4J=%=xS6W-ZJNk6(kF&DG{PEVhB&VOcW%q z;~fhkRzBMR?gar*0h;&~%%W-{3PO!XoW9}@^r@ChfoZjyZc`;4lu)XhTX%TW8O5qAsLwy{m zy=0j(&++}C;#yLlP6AYsEB6uNv&FOMNBIBSSK(GlC>gNIpiRzuvw@Q=2;fUZN3ia>o;{p()n%+3%Z|p4p{(L z8mOo~Nw%qw$k#W)yBJ}aqZ&yo*`9b^@#`03DY)B3LhMvMaJQ4Yhy`jLPgmvWX~d1U zQdQl1XkUq*8o{{JZw3&zQaIi|V8q>Z)#iFzJ)n}>r&yjJWCe>a!^WXM6#{)6k%sWY zKM&EwX`iRWlV4vo&WvHFPKoP_h$8je9~ri+e^i6C;{^uZmL=buJ2Tu`u&$JapLMsh zDmnN&S;c?EfyT9Sb07*L|W@~M5<>R~FLjQ7A+-1jF$6GcFO0EvN(~@a>ewd34K`y+R(r8fRvuh$A z=DC+9*0@j!`^!fuB8_xBa3lHV_XBT3H33<>mF@Kp&FyIi#wfJ$5y*wbe6@Rp5owAt zt&eEq=!LDr0!Y0NBOq-J;dF;&<1v;`Z^+pC(MGhWgGR-DKTTJN&ndV2fhp-E(cd_t zUQf!M7b6qU$z-Fjglf~5TwRa}j$fnM8?)mqH1{vA7pU_vGx$JH$jDWmY_9Y3zWyh1 zq@@OS60{B?Tcbhl{N*9RL%24va4URW*Fs!PZ`@Cuetcu((>6JLV!`&`!PAk!2%U(k z$HCpiPAZ$Zv#=Nb^9@Ql0z3pyzj0aJO0c z>$*)mdKOuo@ao-HfBVr)1$Ek(jh~*7T>Sybs-Yk9_SG9Bb#rDFF!7>J#4g)4mY~ji zW@CHOKLm4Rr-hs!N*&Y&k&PH<_ztH%wrl?Xo2V(~YEM4;T|Ms`n~{-(jS?AcoRl3T zQL;!6ABVBM} z;d(sNuyV&lhu{iZl>b3=`y%bN_vimf#G$bD`d^|qcX}X~-y6T_an}c%0yacB|nE{~avc z#&3P1nx#S&RdAF44%XxlPJA3RDm#)hnn?=z*SR8P$e)Pl55b%b)nOYZ}{mbje zNLykuiNb%)DD$Ytyh27@fw;gbVM|_J%vIT7_L!XOqeG4RYIz^u;5sR$z9$`xs=cjC zUr0@{bN7|-(eT;bnRTm`xq2KV?K1CqL;<88>`c2%PL@3Q5rLP)aK46#V=M@BfN}C= zac~pl^lk<`Ho=&{09@)n|^>ygd2H|FwFmr z^P8LTJoL8t4BbnO&d%6#UW~Ej?l4cLS^>g@ak3+sUw`J5!zVp?uG}KexW^;s9dvxu zeSivqB2&H1(lj6j&q@A3{@irzJ8IcPc36&POfvwB!vum8`T1YLhp(z9j*r2H{7XFN z`(x_Jbwo3YD{io#tU=Dkzs*v@HRi7&Bc$AToT1);85fHu@G&C}`(||)U7w~zzDTYP zA_>L4Q>OVjARF279)z#>qOdN+#DW!TD(rz|@3V~ns0&mLG8;`*_w5g+8POK-0f{KJ z;%*U|^U`)W55_95gWAaK+2Sg?8Uho8DR2(#C7oI;8GsEYmF*ST6I;U2p3#6?bob|P z5w<(1bs#CU=(r-B#dY-@vvu&XH8ga$I&r<+lBm>;@;)g4jH2YWysnE|saC{Nv}l)U zc0bi>pATx2?O|K%E2e24e{u?`TZMdJIY1h>vZjFTttP_>B~jFm!`6dIUP*H&TNj!P z3cr%5cO!A|5J_=1;nbiu-WPkal#P_~=6_0H;NGs(ag>_NI| zZ{ArXfy2+W3FVhs9d8N^C19*zzoQYo5!HFk8KlhKWvibD@k>lDx0C5`9B;6c8wFnj zmb`ts{=nx`1i!lVETTZ5vA!ofG)wt#pAFOk@|N?$!yNoFK>}3ZAS-Jn{|P$6H`ZuG z@kizkznXv_GcghP1t7!#ez8OjLP5xJcb!VEULbecn@)oUIHXt82&om}V+l1h;@J?2 zNVdL(=^WrU2*=0J!%4wvu-d&0AFzDh7=ldY-XS)JvklZH!>w?pK$m2v=oIVh%-%M@ zuU)cNm`On*4RThz$=^9SMolmy5`{1FIPkzPEDVx zmLf}#$H@F(OnF3L`}5Lc2qbUVc|pdOKUsm?Mcm-`hdHMhedy*|<9a?&w_pAaC;gCv zlTZA=N`pKtt9j|L&sq#c_#vno0Jd%^6rY->6XRy$jm9o)0&17~W*{e^p?R$?Kl$URGie=b82vnLD_3(@6ycmODPm)%Y9lb^4pt-cL6*SzD)1UC+ z(RTLBS-Pi5+&+7!gJ!2%t*gR~f1L>T^hUO%Si<6_c2z0n8|gF_8268 zu>H?=#BZGVDLj64CiHXAqG`&Uaj=-d%7Sj{=4D>0{t7B%C-U?kiA7Qsods5u#ZNZ;vP)4bM*zA-tF=dd>UjB4HtY+N31(? z_geO1W$Vm;mcN}oyWD!yo;M^a1D@Bw31WN@5@H7eIL?$Q;?H=We?ISPOxEephZ`Yj z61bi@pe2hVTYp>Wx#jA?-sSJ4C`A5^QZ5RF8RnO`?>q;~jn-X!=oTZ^?t1D!O~`5EbOEPQp{ro~lA{|ee4*LS808V;b^m?85pArh zv~gz5ML9(ElYOGgy}x{+uk&(_)NG6`qrG9W-#A;;$UodA=sz~ETn}49x70ig3QofOu zq+7pTqBlm!+fQXYEFr!0?mtNULefc{jG5lPD$^ty1DPbsn78&N=>YeD_$$e6_5g5DDz0(M%EME`q7e;rpU)wq#Nocc0`qI9_8(AQ_SaN z-3C_mrzh$k`uUG2PAu57rc}%CoiE7nt=2Wh1?+va3Z>onIWjtd)t$Dxi@1bO!F0#0 zXS(pZk*^`wJJ2^-EKrtZ*^kjJYPFpn^7KrUm35ro@y^f~8d|A*QjR9q>Mw3>7~B== zKOxt65-g%dLSBH)4)<%`X`M-hW)0Q#8@*h`OUzT9!Yg&LZ^pV8iwB=N1Iw6&*Xe2% zvGZ`&=-uAhY+F# z(cWqLv+34@SI^~-)N%V9T;%n14J)GbTk?cuF#I*%l)VVRN{Kh+l#C&cG$rwtHWMmW8 z%&?nq$gx?NDTXzfg=!*S7Ge?-FxY6Xv6%|KeN^(p-{*7Y%_W9-ubv4FLB6Dx1Yj*| zr`Adl4e<;v)vL!qY-T3jd>_>gKHH{JUh?_SXjA{iU3(IB z-+I=D++Co*M_m=vFz%K}XHebeEf6xoQWy48D?AVn(>8AcP1xECq5tUnm?e$C^_j5ENLjC0J>NYq%Q~r zZ~>g5dHS4|DE(cF#RD$$DvdG2!JnP4CRB3+}hM~lM{h`fpkl((fE91M46VPRq{RBp309kjh&z{ z+)i-ugl*6axnEN^pYHbbB!R*Dew`+9FDR7jSvYNG3XnD8AGR*@>`G$GrXulL`7XRR zi0_B8&y3d!2BOsU%T555N@4MRC~(HNV}Tny7^N!Ab_Jnt;^_GBgtPOeND+1p__Z(W z(4Fh_CCl6?LC89{T~lAMKTw=y#Qgq44X$VpI*M<%geld=a;0>SOe_*Vcaa{G+A9pQh~kM2N;{8#EyITUrY+)Dx_;;;n6hdcDho$xjfpWK3@z30ytoh?WiNkJ zJ~Mg+&iO?fFX#PxshQz5QFuHQ#9qtX1TEA-IY1P(D8CR|wmdT;L@NiiW#f?$?1dGR z+w~6GCy)4S@ZXVj_>ivW&c3LcIE>Tn5Rc>A0c(Q!n%7$LfES~n--4>{7~n}Cmdkik-!^uY8Ar>V&G6o+5!Hf8Zy3<@?8+Z&m>u8ryf16zHxffxBmOIuMt6BIs%vfFUs zzGc}|wZJy&W>Nnfa9JRXQ#|-HEF)aZ)3p}8Ax`VSD zKyUtvuWJ^(oEsLE$ISp?=D+Dbv^4GGbzp-MX!Uq{@mDCTNBo&!gJ#CNlKvk4edA~0 zk5uc<0Jq6IY83&rB|e00^=H2e^d*6WMN;1G4mbji0M?+@VvK-xf)x3v0|-Y~oJN0) z?NC(dqoycejQY7ncD-O_=(+nQd9uD=bk5PFQ_Do(yOi%>)3MVwMUfk`2ZtW2t=`z~ zgNsQu9WkV)SZW|7n?N*imtY~JFvY89wLZ3r>_+Z8h12iPAn9%tjJ!h8R9e)C)bcR- zXTKy`YSst6rKlpbiP&~)V_e~PRUCT>UE_!)?{R>=;+@W_Du4DkG?Iaw^ie#DN>tXc z`aM2sj^onFB$56w`>A?Uxc6@FFFA`G>7-%nTQ)e?^xM8KCTU7nO^Vjf4>PyB#fIY5 zoa_WRQ3;IJmCWHmi)Wg#41ZRH{IAS&LWXwhugjn2?oR9wOVO>DkGw025A}*d0B=vB z@Tl_Lp0|G-Vhza<+uCQ=WNDXFLd!PLg=I_&sdDKiwV;rBkD}gD(@;%nC_u97>jP6pNg)y%)80qEE zQ`*Y?sWIZj*t5K@fM(@uT$Ll`fDmB|O@t3piAghkW@idH1nZ7LK<9YA*AMB&=)@G) z^Qr7Ks^u*wfDLVk?Lj0oP{+sCZ&wv1#I3cNZO2*w&)&dyYo_P^2mpw+m1xb323iQa zL#h5UPT&hG8Uce8%){R*?bHs}$>m_O2T{b&9hL7lZS{li7{0N=API3jXZmFM@iuq? zG#v7qV@+^^Y8ddwgar3wxr6EIqCHsC(4>e%kY}#~#X8j)Fk8?_Ge^sTJ{L{e4F_w# zy}dwjT4L^as?llQTHbF;sE_zGsIJ%qwQ74$*B?w^{|+%HJ+J2o*Bmbs%GOcm`M_#RT~u8bbzf-KHB%+1Z-W@nX;Msx0A_3W zeRMky@7d+1M`P7G_A6%X(Y&Vx0DQEay8vf`=e)ajPUiM}Klu*FA9l!cQ(Sr3YD}CQ z9|BcZM78VtH7!Tvb52G|_abFlM^>YPJl zBavsHwn8QoDI!CX_Rk@jXiV{pUavZSDtE$XwFm5tcms|u&U-Hh?@C8K#71@5JlW_h zkI#f_nT1M>SeL@=Yz77XYDLAajG${sowE{-wh2>jEy4bD=C9{@8+@gaZWJen{>cYq zFwit~&gwXPK0GLe^8I7NHlWW@)JJAs@Rk}6Lv;F@4|b%Q96-2U5OKR%hVf1oBR}x4 zo%O!|1G$B1A1!nfI?ZX>Gk@Ep>Tvq^_b9{3vOgR3X8JYT2kX4%ZD_if&{9+WW2RBN z?2_N_;tua{tX@_(Q>{Ej38(gGo%82NzfV;nrO+U*%Wg-F)Y;xi7?@_F&RxttKBY9-f9y4dCph6r?4k?5P%bUX^LBX zRxZh+5|=7Xi9-e3@Y%v&Zst9+H~wG^c~AT%NI}~``nBy7mrb1$rTrdx9h2xsSsX(} zPGL{|0TB|;X)ROVmci{Vtj1jSZ-A+RZHmz)`;WDhp4pp!a|xv5%0ycQcBy>2q3r9$ zmhYdBo%h5^MB9n|0ZBIh`n6+QRPS-$51Zl@sf@Lgnbsy%F02~K;G7O`)<h6~H4 zoDKNTY>Zfz`*SHsvev~+!tx(3+%TnWYI*#DR;BwKayJsnQX%Md2GV2Bj!#RExuBC; ze=j)qXpo#wj_4`eEZA@hJPk$C=?D&m_mv|(-*{_xF;1VG6(zZUBBjr*G zQ2^rq8mWP|ridmn54}=)(Qld8y>7XB>rlnx&4Gn8Ghwek?4&f|JEd@MNcFSY?} zsn0Topoq9Os1PVPy?;8a1xLd6lfJ}m>|O&wE%qjePq?$X#uF%BeJ-B53_jE|NZm}C zCMJhq50~x{O)>VK(mYt3#aMXt8_cu8?;Krv``*_LEC}@um2Nbo=?0Hn6vK|wbf>(9 z6U*(Z?ydZMngGDRyo7ae^m15;L7Ge(@DI+Ozj+$pbn+}oS=_~-R2)nvoV4p?6MVGIM%mqrU2{Bdg&7{S$j4Z+m^F_v z{~?v3|AaO4D4iK3yPlB?FQmU$&PGPKQopmzp=hXDES(Ah*ixpVm+DoBr#NCT{L^yM zjYah6BdZ78_Ku@^AVWtoVzY!W#LfEhgl8S6VCKtnXRQxr=Kfqynn zJq|k4107nu3w7IMl8u@B3AN;b+5MR{b`$03l{2-E9evVO%@(!}_7ytt4ihi5}zk z^UeiI1XmT6J?PIae>f36n)|yHcd-5POf&9F5${#85}VbaxAsL|8+%`k^qGPzgU&U~ zQ6H??xjo=Ayl`4fJosr_$o&y$ewO~!n zvfl^g&j|B{i!w{42aI|f1nQYaM$_Pt%YbT6sX*?7<6&=b!h=+IXYDhYMW!>at-i)u z-SHM@9Wp4XTI?89!j(i<0pE~CBWvQOyrPYbp3t(ApzE5Yad2He)|IdBNV;J5cl2lt z+=pjzx7Ys=GxgtV>)Q zE{`2L(2FNYe_s}}_E==G)3;^kH5oym7DeIQBKST}CF8G5)gKp0^8E=jHmhF4$l3&k zn3vpmGu8cEDF^Go!fHu~bP8o=oxAsOKb7OkK2oP=so9J1tf@1?cE4XCeK_Y)zSj^U zhN!V(chRD@S}=}TTvLUx5!t)sN*ULbFbTUw&4v%HL#yG^mQ33s$r8e35^7wiS-p3w ztsvyKqs+pFjp>WLWG-Dx6#@0MH)D_Nj?1AJQqg zm*QE?MmD({HWyd-()TTiFdzsHQBQL4wJU=&Ij)z@Bw1}jo=?K*IB0F`%F&D|IjR0q z^46ySTBP5^LNoRC(qQ+Zluf~FVYdTeD}1V%vfHoOmxwkhiCi%`XO_j9-9oN&GNm`f zrn4TVZOfOG1@j3#@r`D`GP;zzQP}<=Yh*V=ayr+~ueGd?nePp+uV-GYGP>d}dc40D1!gv# zWoqU;o-a|sUh|5>EEZ1JaG;(FZ?k+eUYzArRoZ1x@ESR7;j+S4UA08hU5j8(tO2Yj zWuGsVU6{U6p z;K>A-Vi8mRim%8bHPQ)NstS1P6;y-dp0!Ifd-l*Y2W^~;A@fZS)Dp17q977fM8DGR z!%19@^KZvz;XUvrm;$`j18E^j)i2>Mvb)PT?97FLZ?60xAQz}9%FH@G zL*uJR^`pNH{-~Q}7VLgglzx+D2&+1_jXN^k2UO?U0 zWpdWo{P@em3>Wi6(eBWj#@gK9(WogZx;~BS?_q3ZBWYO)4MbKPS4Wq`!bFd-ay!2) zX|R*>maWv@+@SEby9aB--g3namG5Vd@yheDM42W_hZjA&UJu*O5^8$Rn$|9eF3v6c zq{7fiMz^h05>{RenhENZ*r%p=h1d!p4Z(iT#aKjdjI7@^5nyepZN>cUuAe;1#z3yx1*1PJY)SFsgx#S_(HE$nN7`1()gi^Bp znp96y{yFl@Nu_ik#cq1mX4`#f&nsP@C)0`AjFJ=J)t_kZywv{3^`e2C#g;42mPbQ6 z=9}%WEAiCkG5p=HC9V3>w3QuWZ71cAthMPhMRtfX`z^O_93#tCdV#HKD-0*Lo&8NL zY@g#+Gm>6dmSxBst4r-qu;$H1XL6LvD7KyW?9`%m&pht5hfOz=PiTA-6Y(54mLkd@ z>-9eMN##JYv%5Cr+o1=jEC$yr2!p3OT#D976oeI2#yDxI)O!2L= ze)uC?d|qfY+)Z6^%On})B1}8=v%4b8I(Ne)`L%2BK>&{>8}v$eZaRTg1OT3^B}o<` zE*UG{jnWzMvIv?`#K^w0iPD`?1#M=M{AlU=Prjy}@q+MV;?Fs5Ed%D6Z%Q~!^5z_t z9Q>276};ez$?1Jl2&mR@r0;pFs*cvx7`9KAbP%Dd{Ota$y__al)GZCs6=wmUHzX5v zMpBixJy!_z@xdZ!P(J0C;7#IaU5Ck+Qsfw=aCEXHw)YlIxJ9hiw@;q@JBTETNv`+M z0)r!L5x!_Qt4o-_i#wGp`Gj^7g!%a<9E)5C2}!wLiIN!Y;U{b7g+rBm9JK8tBr#4X zXzde51T%F;h9=p|;KU?99y&lvAv?B+t-ux6*|2HZ2a=d#a4e8!b`!xghq=&pftsB? z&k=t_TebF4FyH1ZK@38Icg(eY#v~JyOyLyH(1W!(TJY^i0uNpna9;Jc1X(=)h3;{N zD8eV8iqMI~-~HzB>SLX#TJEdb^htiQHu*YvlpIG!hMCL%7w^LVebGK_i4DOnq))z+ U)lvliIRL;Ini-Uxb-4Gx0Is0pTL1t6 literal 0 HcmV?d00001 diff --git a/packages/components/nodes/memory/BufferMemory/BufferMemory.ts b/packages/components/nodes/memory/BufferMemory/BufferMemory.ts index 7793d96d4..5310f88db 100644 --- a/packages/components/nodes/memory/BufferMemory/BufferMemory.ts +++ b/packages/components/nodes/memory/BufferMemory/BufferMemory.ts @@ -1,6 +1,6 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses } from '../../../src/utils' -import { BufferMemory } from 'langchain/memory' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' +import { BufferMemory, BufferMemoryInput } from 'langchain/memory' class BufferMemory_Memory implements INode { label: string @@ -41,7 +41,7 @@ class BufferMemory_Memory implements INode { async init(nodeData: INodeData): Promise { const memoryKey = nodeData.inputs?.memoryKey as string const inputKey = nodeData.inputs?.inputKey as string - return new BufferMemory({ + return new BufferMemoryExtended({ returnMessages: true, memoryKey, inputKey @@ -49,4 +49,43 @@ class BufferMemory_Memory implements INode { } } +class BufferMemoryExtended extends BufferMemory { + isShortTermMemory = true + + constructor(fields: BufferMemoryInput) { + super(fields) + } + + async getChatMessages(): Promise { + const memoryResult = await this.loadMemoryVariables({}) + const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise { + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues) + } + + async clearChatMessages(): Promise { + await this.clear() + } + + async resumeMessages(messages: IMessage[]): Promise { + // Clear existing chatHistory to avoid duplication + if (messages.length) await this.clear() + + // Insert into chatHistory + for (const msg of messages) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } + } +} + module.exports = { nodeClass: BufferMemory_Memory } diff --git a/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts b/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts index 84e607e54..9915d48d9 100644 --- a/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts +++ b/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts @@ -1,5 +1,5 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses } from '../../../src/utils' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { BufferWindowMemory, BufferWindowMemoryInput } from 'langchain/memory' class BufferWindowMemory_Memory implements INode { @@ -57,7 +57,46 @@ class BufferWindowMemory_Memory implements INode { k: parseInt(k, 10) } - return new BufferWindowMemory(obj) + return new BufferWindowMemoryExtended(obj) + } +} + +class BufferWindowMemoryExtended extends BufferWindowMemory { + isShortTermMemory = true + + constructor(fields: BufferWindowMemoryInput) { + super(fields) + } + + async getChatMessages(): Promise { + const memoryResult = await this.loadMemoryVariables({}) + const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise { + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues) + } + + async clearChatMessages(): Promise { + await this.clear() + } + + async resumeMessages(messages: IMessage[]): Promise { + // Clear existing chatHistory to avoid duplication + if (messages.length) await this.clear() + + // Insert into chatHistory + for (const msg of messages) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } } } diff --git a/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts b/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts index 332d73aa9..e88beb139 100644 --- a/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts +++ b/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts @@ -1,5 +1,5 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses } from '../../../src/utils' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { ConversationSummaryMemory, ConversationSummaryMemoryInput } from 'langchain/memory' import { BaseLanguageModel } from 'langchain/base_language' @@ -56,7 +56,50 @@ class ConversationSummaryMemory_Memory implements INode { inputKey } - return new ConversationSummaryMemory(obj) + return new ConversationSummaryMemoryExtended(obj) + } +} + +class ConversationSummaryMemoryExtended extends ConversationSummaryMemory { + isShortTermMemory = true + + constructor(fields: ConversationSummaryMemoryInput) { + super(fields) + } + + async getChatMessages(): Promise { + const memoryResult = await this.loadMemoryVariables({}) + const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise { + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues) + } + + async clearChatMessages(): Promise { + await this.clear() + } + + async resumeMessages(messages: IMessage[]): Promise { + // Clear existing chatHistory to avoid duplication + if (messages.length) await this.clear() + + // Insert into chatHistory + for (const msg of messages) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } + + // Replace buffer + const chatMessages = await this.chatHistory.getMessages() + this.buffer = await this.predictNewSummary(chatMessages.slice(-2), this.buffer) } } diff --git a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts index 8ca6cf9e5..a1c44554e 100644 --- a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts +++ b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts @@ -1,15 +1,19 @@ import { - ICommonObject, - INode, - INodeData, - INodeParams, - getBaseClasses, - getCredentialData, - getCredentialParam, - serializeChatHistory -} from '../../../src' + DynamoDBClient, + DynamoDBClientConfig, + GetItemCommand, + GetItemCommandInput, + UpdateItemCommand, + UpdateItemCommandInput, + DeleteItemCommand, + DeleteItemCommandInput, + AttributeValue +} from '@aws-sdk/client-dynamodb' import { DynamoDBChatMessageHistory } from 'langchain/stores/message/dynamodb' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' +import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage } from 'langchain/schema' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { ICommonObject, IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' class DynamoDb_Memory implements INode { label: string @@ -60,7 +64,8 @@ class DynamoDb_Memory implements INode { label: 'Session ID', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -78,73 +83,205 @@ class DynamoDb_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initalizeDynamoDB(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const dynamodbMemory = await initalizeDynamoDB(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing DynamoDb memory session ${sessionId ? sessionId : chatId}`) - await dynamodbMemory.clear() - options.logger.info(`Successfully cleared DynamoDb memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const dynamodbMemory = await initalizeDynamoDB(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await dynamodbMemory.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): Promise => { const tableName = nodeData.inputs?.tableName as string const partitionKey = nodeData.inputs?.partitionKey as string - const sessionId = nodeData.inputs?.sessionId as string const region = nodeData.inputs?.region as string const memoryKey = nodeData.inputs?.memoryKey as string const chatId = options.chatId let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const accessKeyId = getCredentialParam('accessKey', credentialData, nodeData) const secretAccessKey = getCredentialParam('secretAccessKey', credentialData, nodeData) + const config: DynamoDBClientConfig = { + region, + credentials: { + accessKeyId, + secretAccessKey + } + } + + const client = new DynamoDBClient(config ?? {}) + const dynamoDb = new DynamoDBChatMessageHistory({ tableName, partitionKey, sessionId: sessionId ? sessionId : chatId, - config: { - region, - credentials: { - accessKeyId, - secretAccessKey - } - } + config }) const memory = new BufferMemoryExtended({ memoryKey: memoryKey ?? 'chat_history', chatHistory: dynamoDb, - isSessionIdUsingChatMessageId + isSessionIdUsingChatMessageId, + sessionId, + dynamodbClient: client }) return memory } interface BufferMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + dynamodbClient: DynamoDBClient + sessionId: string +} + +interface DynamoDBSerializedChatMessage { + M: { + type: { + S: string + } + text: { + S: string + } + role?: { + S: string + } + } } class BufferMemoryExtended extends BufferMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + sessionId = '' + dynamodbClient: DynamoDBClient - constructor(fields: BufferMemoryInput & Partial) { + constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.sessionId = fields.sessionId + this.dynamodbClient = fields.dynamodbClient + } + + overrideDynamoKey(overrideSessionId = '') { + const existingDynamoKey = (this as any).dynamoKey + const partitionKey = (this as any).partitionKey + + let newDynamoKey: Record = {} + + if (Object.keys(existingDynamoKey).includes(partitionKey)) { + newDynamoKey[partitionKey] = { S: overrideSessionId } + } + + return Object.keys(newDynamoKey).length ? newDynamoKey : existingDynamoKey + } + + async addNewMessage( + messages: StoredMessage[], + client: DynamoDBClient, + tableName = '', + dynamoKey: Record = {}, + messageAttributeName = 'messages' + ) { + const params: UpdateItemCommandInput = { + TableName: tableName, + Key: dynamoKey, + ExpressionAttributeNames: { + '#m': messageAttributeName + }, + ExpressionAttributeValues: { + ':empty_list': { + L: [] + }, + ':m': { + L: messages.map((message) => { + const dynamoSerializedMessage: DynamoDBSerializedChatMessage = { + M: { + type: { + S: message.type + }, + text: { + S: message.data.content + } + } + } + if (message.data.role) { + dynamoSerializedMessage.M.role = { S: message.data.role } + } + return dynamoSerializedMessage + }) + } + }, + UpdateExpression: 'SET #m = list_append(if_not_exists(#m, :empty_list), :m)' + } + + await client.send(new UpdateItemCommand(params)) + } + + async getChatMessages(overrideSessionId = ''): Promise { + if (!this.dynamodbClient) return [] + + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey + const tableName = (this as any).tableName + const messageAttributeName = (this as any).messageAttributeName + + const params: GetItemCommandInput = { + TableName: tableName, + Key: dynamoKey + } + + const response = await this.dynamodbClient.send(new GetItemCommand(params)) + const items = response.Item ? response.Item[messageAttributeName]?.L ?? [] : [] + const messages = items + .map((item) => ({ + type: item.M?.type.S, + data: { + role: item.M?.role?.S, + content: item.M?.text.S + } + })) + .filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined) + const baseMessages = messages.map(mapStoredMessageToChatMessage) + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + if (!this.dynamodbClient) return + + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey + const tableName = (this as any).tableName + const messageAttributeName = (this as any).messageAttributeName + + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input) { + const newInputMessage = new HumanMessage(input.text) + const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + await this.addNewMessage(messageToAdd, this.dynamodbClient, tableName, dynamoKey, messageAttributeName) + } + + if (output) { + const newOutputMessage = new AIMessage(output.text) + const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + await this.addNewMessage(messageToAdd, this.dynamodbClient, tableName, dynamoKey, messageAttributeName) + } + } + + async clearChatMessages(overrideSessionId = ''): Promise { + if (!this.dynamodbClient) return + + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey + const tableName = (this as any).tableName + + const params: DeleteItemCommandInput = { + TableName: tableName, + Key: dynamoKey + } + await this.dynamodbClient.send(new DeleteItemCommand(params)) + await this.clear() } } diff --git a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts index 76cb7e313..8bebcfad1 100644 --- a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts +++ b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts @@ -1,17 +1,9 @@ -import { - getBaseClasses, - getCredentialData, - getCredentialParam, - ICommonObject, - INode, - INodeData, - INodeParams, - serializeChatHistory -} from '../../../src' +import { MongoClient, Collection, Document } from 'mongodb' import { MongoDBChatMessageHistory } from 'langchain/stores/message/mongodb' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' -import { BaseMessage, mapStoredMessageToChatMessage } from 'langchain/schema' -import { MongoClient } from 'mongodb' +import { BaseMessage, mapStoredMessageToChatMessage, AIMessage, HumanMessage } from 'langchain/schema' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { ICommonObject, IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' class MongoDB_Memory implements INode { label: string @@ -57,7 +49,8 @@ class MongoDB_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -75,44 +68,33 @@ class MongoDB_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initializeMongoDB(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const mongodbMemory = await initializeMongoDB(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing MongoDB memory session ${sessionId ? sessionId : chatId}`) - await mongodbMemory.clear() - options.logger.info(`Successfully cleared MongoDB memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const mongodbMemory = await initializeMongoDB(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await mongodbMemory.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): Promise => { const databaseName = nodeData.inputs?.databaseName as string const collectionName = nodeData.inputs?.collectionName as string - const sessionId = nodeData.inputs?.sessionId as string const memoryKey = nodeData.inputs?.memoryKey as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) let mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData) const client = new MongoClient(mongoDBConnectUrl) await client.connect() + const collection = client.db(databaseName).collection(collectionName) + /**** Methods below are needed to override the original implementations ****/ const mongoDBChatMessageHistory = new MongoDBChatMessageHistory({ collection, sessionId: sessionId ? sessionId : chatId @@ -140,24 +122,83 @@ const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): P mongoDBChatMessageHistory.clear = async (): Promise => { await collection.deleteOne({ sessionId: (mongoDBChatMessageHistory as any).sessionId }) } + /**** End of override functions ****/ return new BufferMemoryExtended({ memoryKey: memoryKey ?? 'chat_history', chatHistory: mongoDBChatMessageHistory, - isSessionIdUsingChatMessageId + isSessionIdUsingChatMessageId, + sessionId, + collection }) } interface BufferMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + collection: Collection + sessionId: string } class BufferMemoryExtended extends BufferMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + sessionId = '' + collection: Collection - constructor(fields: BufferMemoryInput & Partial) { + constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.sessionId = fields.sessionId + this.collection = fields.collection + } + + async getChatMessages(overrideSessionId = ''): Promise { + if (!this.collection) return [] + + const id = overrideSessionId ?? this.sessionId + const document = await this.collection.findOne({ sessionId: id }) + const messages = document?.messages || [] + const baseMessages = messages.map(mapStoredMessageToChatMessage) + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + if (!this.collection) return + + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input) { + const newInputMessage = new HumanMessage(input.text) + const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + await this.collection.updateOne( + { sessionId: id }, + { + $push: { messages: { $each: messageToAdd } } + }, + { upsert: true } + ) + } + + if (output) { + const newOutputMessage = new AIMessage(output.text) + const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + await this.collection.updateOne( + { sessionId: id }, + { + $push: { messages: { $each: messageToAdd } } + }, + { upsert: true } + ) + } + } + + async clearChatMessages(overrideSessionId = ''): Promise { + if (!this.collection) return + + const id = overrideSessionId ?? this.sessionId + await this.collection.deleteOne({ sessionId: id }) + await this.clear() } } diff --git a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts index 9cdbcd5cc..6dd949446 100644 --- a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts +++ b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts @@ -1,9 +1,9 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ICommonObject } from '../../../src' import { MotorheadMemory, MotorheadMemoryInput } from 'langchain/memory' import fetch from 'node-fetch' -import { getBufferString } from 'langchain/memory' +import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory' class MotorMemory_Memory implements INode { label: string @@ -46,7 +46,8 @@ class MotorMemory_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -64,35 +65,22 @@ class MotorMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initalizeMotorhead(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const motorhead = await initalizeMotorhead(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Motorhead memory session ${sessionId ? sessionId : chatId}`) - await motorhead.clear() - options.logger.info(`Successfully cleared Motorhead memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const motorhead = await initalizeMotorhead(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await motorhead.loadMemoryVariables({}) - return getBufferString(memoryResult[key]) - } - } } const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): Promise => { const memoryKey = nodeData.inputs?.memoryKey as string const baseURL = nodeData.inputs?.baseURL as string - const sessionId = nodeData.inputs?.sessionId as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const apiKey = getCredentialParam('apiKey', credentialData, nodeData) @@ -100,8 +88,9 @@ const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): let obj: MotorheadMemoryInput & Partial = { returnMessages: true, - sessionId: sessionId ? sessionId : chatId, - memoryKey + sessionId, + memoryKey, + isSessionIdUsingChatMessageId } if (baseURL) { @@ -117,8 +106,6 @@ const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): } } - if (isSessionIdUsingChatMessageId) obj.isSessionIdUsingChatMessageId = true - const motorheadMemory = new MotorheadMemoryExtended(obj) // Get messages from sessionId @@ -139,7 +126,24 @@ class MotorheadMemoryExtended extends MotorheadMemory { this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId } - async clear(): Promise { + async loadMemoryVariables(values: InputValues, overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.loadMemoryVariables({ values }) + } + + async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.saveContext(inputValues, outputValues) + } + + async clear(overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } try { await this.caller.call(fetch, `${this.url}/sessions/${this.sessionId}/memory`, { //@ts-ignore @@ -155,6 +159,28 @@ class MotorheadMemoryExtended extends MotorheadMemory { await this.chatHistory.clear() await super.clear() } + + async getChatMessages(overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + const memoryVariables = await this.loadMemoryVariables({}, id) + const baseMessages = memoryVariables[this.memoryKey] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues, id) + } + + async clearChatMessages(overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + await this.clear(id) + } } module.exports = { nodeClass: MotorMemory_Memory } diff --git a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts index 7fe447ad5..4692088b9 100644 --- a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts @@ -1,9 +1,9 @@ -import { INode, INodeData, INodeParams, ICommonObject } from '../../../src/Interface' -import { getBaseClasses, getCredentialData, getCredentialParam, serializeChatHistory } from '../../../src/utils' +import { Redis } from 'ioredis' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { RedisChatMessageHistory, RedisChatMessageHistoryInput } from 'langchain/stores/message/ioredis' -import { mapStoredMessageToChatMessage, BaseMessage } from 'langchain/schema' -import { Redis } from 'ioredis' +import { mapStoredMessageToChatMessage, BaseMessage, AIMessage, HumanMessage } from 'langchain/schema' +import { INode, INodeData, INodeParams, ICommonObject, MessageType, IMessage } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' class RedisBackedChatMemory_Memory implements INode { label: string @@ -38,7 +38,8 @@ class RedisBackedChatMemory_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -64,40 +65,28 @@ class RedisBackedChatMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return await initalizeRedis(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const redis = await initalizeRedis(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Redis memory session ${sessionId ? sessionId : chatId}`) - await redis.clear() - options.logger.info(`Successfully cleared Redis memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const redis = await initalizeRedis(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await redis.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Promise => { - const sessionId = nodeData.inputs?.sessionId as string const sessionTTL = nodeData.inputs?.sessionTTL as number const memoryKey = nodeData.inputs?.memoryKey as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const redisUrl = getCredentialParam('redisUrl', credentialData, nodeData) let client: Redis + if (!redisUrl || redisUrl === '') { const username = getCredentialParam('redisCacheUser', credentialData, nodeData) const password = getCredentialParam('redisCachePwd', credentialData, nodeData) @@ -115,7 +104,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom } let obj: RedisChatMessageHistoryInput = { - sessionId: sessionId ? sessionId : chatId, + sessionId, client } @@ -128,6 +117,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom const redisChatMessageHistory = new RedisChatMessageHistory(obj) + /**** Methods below are needed to override the original implementations ****/ redisChatMessageHistory.getMessages = async (): Promise => { const rawStoredMessages = await client.lrange((redisChatMessageHistory as any).sessionId, 0, -1) const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) @@ -145,25 +135,73 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom redisChatMessageHistory.clear = async (): Promise => { await client.del((redisChatMessageHistory as any).sessionId) } + /**** End of override functions ****/ const memory = new BufferMemoryExtended({ memoryKey: memoryKey ?? 'chat_history', chatHistory: redisChatMessageHistory, - isSessionIdUsingChatMessageId + isSessionIdUsingChatMessageId, + sessionId, + redisClient: client }) + return memory } interface BufferMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + redisClient: Redis + sessionId: string } class BufferMemoryExtended extends BufferMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + sessionId = '' + redisClient: Redis - constructor(fields: BufferMemoryInput & Partial) { + constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.sessionId = fields.sessionId + this.redisClient = fields.redisClient + } + + async getChatMessages(overrideSessionId = ''): Promise { + if (!this.redisClient) return [] + + const id = overrideSessionId ?? this.sessionId + const rawStoredMessages = await this.redisClient.lrange(id, 0, -1) + const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) + const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage) + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + if (!this.redisClient) return + + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input) { + const newInputMessage = new HumanMessage(input.text) + const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0])) + } + + if (output) { + const newOutputMessage = new AIMessage(output.text) + const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0])) + } + } + + async clearChatMessages(overrideSessionId = ''): Promise { + if (!this.redisClient) return + + const id = overrideSessionId ?? this.sessionId + await this.redisClient.del(id) + await this.clear() } } diff --git a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts index 8bca04404..327f0f329 100644 --- a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts @@ -1,8 +1,10 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, getCredentialData, getCredentialParam, serializeChatHistory } from '../../../src/utils' -import { ICommonObject } from '../../../src' +import { Redis } from '@upstash/redis' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { UpstashRedisChatMessageHistory } from 'langchain/stores/message/upstash_redis' +import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage } from 'langchain/schema' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { ICommonObject } from '../../../src/Interface' class UpstashRedisBackedChatMemory_Memory implements INode { label: string @@ -43,7 +45,8 @@ class UpstashRedisBackedChatMemory_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -62,51 +65,43 @@ class UpstashRedisBackedChatMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initalizeUpstashRedis(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const redis = await initalizeUpstashRedis(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Upstash Redis memory session ${sessionId ? sessionId : chatId}`) - await redis.clear() - options.logger.info(`Successfully cleared Upstash Redis memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const redis = await initalizeUpstashRedis(nodeData, options) - const key = 'chat_history' - const memoryResult = await redis.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject): Promise => { const baseURL = nodeData.inputs?.baseURL as string - const sessionId = nodeData.inputs?.sessionId as string const sessionTTL = nodeData.inputs?.sessionTTL as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const upstashRestToken = getCredentialParam('upstashRestToken', credentialData, nodeData) + const client = new Redis({ + url: baseURL, + token: upstashRestToken + }) + const redisChatMessageHistory = new UpstashRedisChatMessageHistory({ sessionId: sessionId ? sessionId : chatId, sessionTTL: sessionTTL ? parseInt(sessionTTL, 10) : undefined, - config: { - url: baseURL, - token: upstashRestToken - } + client }) const memory = new BufferMemoryExtended({ memoryKey: 'chat_history', chatHistory: redisChatMessageHistory, - isSessionIdUsingChatMessageId + isSessionIdUsingChatMessageId, + sessionId, + redisClient: client }) return memory @@ -114,14 +109,59 @@ const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject interface BufferMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + redisClient: Redis + sessionId: string } class BufferMemoryExtended extends BufferMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + sessionId = '' + redisClient: Redis - constructor(fields: BufferMemoryInput & Partial) { + constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.sessionId = fields.sessionId + this.redisClient = fields.redisClient + } + + async getChatMessages(overrideSessionId = ''): Promise { + if (!this.redisClient) return [] + + const id = overrideSessionId ?? this.sessionId + const rawStoredMessages: StoredMessage[] = await this.redisClient.lrange(id, 0, -1) + const orderedMessages = rawStoredMessages.reverse() + const previousMessages = orderedMessages.filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined) + const baseMessages = previousMessages.map(mapStoredMessageToChatMessage) + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + if (!this.redisClient) return + + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input) { + const newInputMessage = new HumanMessage(input.text) + const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0])) + } + + if (output) { + const newOutputMessage = new AIMessage(output.text) + const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0])) + } + } + + async clearChatMessages(overrideSessionId = ''): Promise { + if (!this.redisClient) return + + const id = overrideSessionId ?? this.sessionId + await this.redisClient.del(id) + await this.clear() } } diff --git a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts index ced871a1e..ac0ac8964 100644 --- a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts +++ b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts @@ -1,9 +1,8 @@ -import { SystemMessage } from 'langchain/schema' -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ZepMemory, ZepMemoryInput } from 'langchain/memory/zep' import { ICommonObject } from '../../../src' -import { getBufferString } from 'langchain/memory' +import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory' class ZepMemory_Memory implements INode { label: string @@ -20,7 +19,7 @@ class ZepMemory_Memory implements INode { constructor() { this.label = 'Zep Memory' this.name = 'ZepMemory' - this.version = 1.0 + this.version = 2.0 this.type = 'ZepMemory' this.icon = 'zep.png' this.category = 'Memory' @@ -41,17 +40,12 @@ class ZepMemory_Memory implements INode { type: 'string', default: 'http://127.0.0.1:8000' }, - { - label: 'Auto Summary', - name: 'autoSummary', - type: 'boolean', - default: true - }, { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -61,13 +55,7 @@ class ZepMemory_Memory implements INode { name: 'k', type: 'number', default: '10', - description: 'Window of size k to surface the last k back-and-forth to use as memory.' - }, - { - label: 'Auto Summary Template', - name: 'autoSummaryTemplate', - type: 'string', - default: 'This is the summary of the following conversation:\n{summary}', + description: 'Window of size k to surface the last k back-and-forth to use as memory.', additionalParams: true }, { @@ -109,57 +97,7 @@ class ZepMemory_Memory implements INode { } async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { - const autoSummaryTemplate = nodeData.inputs?.autoSummaryTemplate as string - const autoSummary = nodeData.inputs?.autoSummary as boolean - - const k = nodeData.inputs?.k as string - - let zep = await initalizeZep(nodeData, options) - - // hack to support summary - let tmpFunc = zep.loadMemoryVariables - zep.loadMemoryVariables = async (values) => { - let data = await tmpFunc.bind(zep, values)() - if (autoSummary && zep.returnMessages && data[zep.memoryKey] && data[zep.memoryKey].length) { - const zepClient = await zep.zepClientPromise - const memory = await zepClient.memory.getMemory(zep.sessionId, parseInt(k, 10) ?? 10) - if (memory?.summary) { - let summary = autoSummaryTemplate.replace(/{summary}/g, memory.summary.content) - // eslint-disable-next-line no-console - console.log('[ZepMemory] auto summary:', summary) - data[zep.memoryKey].unshift(new SystemMessage(summary)) - } - } - // for langchain zep memory compatibility, or we will get "Missing value for input variable chat_history" - if (data instanceof Array) { - data = { - [zep.memoryKey]: data - } - } - return data - } - return zep - } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const zep = await initalizeZep(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Zep memory session ${sessionId ? sessionId : chatId}`) - await zep.clear() - options.logger.info(`Successfully cleared Zep memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const aiPrefix = nodeData.inputs?.aiPrefix as string - const humanPrefix = nodeData.inputs?.humanPrefix as string - const zep = await initalizeZep(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await zep.loadMemoryVariables({}) - return getBufferString(memoryResult[key], humanPrefix, aiPrefix) - } + return await initalizeZep(nodeData, options) } } @@ -169,40 +107,94 @@ const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promis const humanPrefix = nodeData.inputs?.humanPrefix as string const memoryKey = nodeData.inputs?.memoryKey as string const inputKey = nodeData.inputs?.inputKey as string - const sessionId = nodeData.inputs?.sessionId as string + const k = nodeData.inputs?.k as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const apiKey = getCredentialParam('apiKey', credentialData, nodeData) - const obj: ZepMemoryInput & Partial = { + const obj: ZepMemoryInput & ZepMemoryExtendedInput = { baseURL, - sessionId: sessionId ? sessionId : chatId, aiPrefix, humanPrefix, returnMessages: true, memoryKey, - inputKey + inputKey, + sessionId, + isSessionIdUsingChatMessageId, + k: k ? parseInt(k, 10) : undefined } if (apiKey) obj.apiKey = apiKey - if (isSessionIdUsingChatMessageId) obj.isSessionIdUsingChatMessageId = true return new ZepMemoryExtended(obj) } interface ZepMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + k?: number } class ZepMemoryExtended extends ZepMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + lastN?: number - constructor(fields: ZepMemoryInput & Partial) { + constructor(fields: ZepMemoryInput & ZepMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.lastN = fields.k + } + + async loadMemoryVariables(values: InputValues, overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.loadMemoryVariables({ ...values, lastN: this.lastN }) + } + + async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.saveContext(inputValues, outputValues) + } + + async clear(overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.clear() + } + + async getChatMessages(overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + const memoryVariables = await this.loadMemoryVariables({}, id) + const baseMessages = memoryVariables[this.memoryKey] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues, id) + } + + async clearChatMessages(overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + await this.clear(id) } } diff --git a/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts b/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts new file mode 100644 index 000000000..db998e1f0 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts @@ -0,0 +1,75 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class CompactRefine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Compact and Refine' + this.name = 'compactrefineLlamaIndex' + this.version = 1.0 + this.type = 'CompactRefine' + this.icon = 'compactrefine.svg' + this.category = 'Response Synthesizer' + this.description = + 'CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Refine Prompt', + name: 'refinePrompt', + type: 'string', + rows: 4, + default: `The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer:`, + warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`, + optional: true + }, + { + label: 'Text QA Prompt', + name: 'textQAPrompt', + type: 'string', + rows: 4, + default: `Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const refinePrompt = nodeData.inputs?.refinePrompt as string + const textQAPrompt = nodeData.inputs?.textQAPrompt as string + + const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) => + refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query) + const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'CompactAndRefine' }) + } +} + +module.exports = { nodeClass: CompactRefine_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg b/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg new file mode 100644 index 000000000..9ea95529b --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/Refine/Refine.ts b/packages/components/nodes/responsesynthesizer/Refine/Refine.ts new file mode 100644 index 000000000..267bc2082 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/Refine/Refine.ts @@ -0,0 +1,75 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class Refine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Refine' + this.name = 'refineLlamaIndex' + this.version = 1.0 + this.type = 'Refine' + this.icon = 'refine.svg' + this.category = 'Response Synthesizer' + this.description = + 'Create and refine an answer by sequentially going through each retrieved text chunk. This makes a separate LLM call per Node. Good for more detailed answers.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Refine Prompt', + name: 'refinePrompt', + type: 'string', + rows: 4, + default: `The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer:`, + warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`, + optional: true + }, + { + label: 'Text QA Prompt', + name: 'textQAPrompt', + type: 'string', + rows: 4, + default: `Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const refinePrompt = nodeData.inputs?.refinePrompt as string + const textQAPrompt = nodeData.inputs?.textQAPrompt as string + + const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) => + refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query) + const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'Refine' }) + } +} + +module.exports = { nodeClass: Refine_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/Refine/refine.svg b/packages/components/nodes/responsesynthesizer/Refine/refine.svg new file mode 100644 index 000000000..1170c5848 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/Refine/refine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts new file mode 100644 index 000000000..cb8800206 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts @@ -0,0 +1,35 @@ +import { INode, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class SimpleResponseBuilder_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Simple Response Builder' + this.name = 'simpleResponseBuilderLlamaIndex' + this.version = 1.0 + this.type = 'SimpleResponseBuilder' + this.icon = 'simplerb.svg' + this.category = 'Response Synthesizer' + this.description = `Apply a query to a collection of text chunks, gathering the responses in an array, and return a combined string of all responses. Useful for individual queries on each text chunk.` + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [] + } + + async init(): Promise { + return new ResponseSynthesizerClass({ type: 'SimpleResponseBuilder' }) + } +} + +module.exports = { nodeClass: SimpleResponseBuilder_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg new file mode 100644 index 000000000..6f04fdc9b --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts b/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts new file mode 100644 index 000000000..448727869 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts @@ -0,0 +1,56 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class TreeSummarize_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'TreeSummarize' + this.name = 'treeSummarizeLlamaIndex' + this.version = 1.0 + this.type = 'TreeSummarize' + this.icon = 'treesummarize.svg' + this.category = 'Response Synthesizer' + this.description = + 'Given a set of text chunks and the query, recursively construct a tree and return the root node as the response. Good for summarization purposes.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Prompt', + name: 'prompt', + type: 'string', + rows: 4, + default: `Context information from multiple sources is below. +--------------------- +{context} +--------------------- +Given the information from multiple sources and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const prompt = nodeData.inputs?.prompt as string + + const textQAPromptTemplate = ({ context = '', query = '' }) => prompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, type: 'TreeSummarize' }) + } +} + +module.exports = { nodeClass: TreeSummarize_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg b/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg new file mode 100644 index 000000000..f81a3a533 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/base.ts b/packages/components/nodes/responsesynthesizer/base.ts new file mode 100644 index 000000000..68fd7f1ab --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/base.ts @@ -0,0 +1,11 @@ +export class ResponseSynthesizerClass { + type: string + textQAPromptTemplate?: any + refinePromptTemplate?: any + + constructor(params: { type: string; textQAPromptTemplate?: any; refinePromptTemplate?: any }) { + this.type = params.type + this.textQAPromptTemplate = params.textQAPromptTemplate + this.refinePromptTemplate = params.refinePromptTemplate + } +} diff --git a/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts new file mode 100644 index 000000000..683e6f25f --- /dev/null +++ b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts @@ -0,0 +1,366 @@ +import { + BaseNode, + Document, + Metadata, + VectorStore, + VectorStoreQuery, + VectorStoreQueryResult, + serviceContextFromDefaults, + storageContextFromDefaults, + VectorStoreIndex, + BaseEmbedding +} from 'llamaindex' +import { FetchResponse, Index, Pinecone, ScoredPineconeRecord } from '@pinecone-database/pinecone' +import { flatten } from 'lodash' +import { Document as LCDocument } from 'langchain/document' +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { flattenObject, getCredentialData, getCredentialParam } from '../../../src/utils' + +class PineconeLlamaIndex_VectorStores implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + tags: string[] + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'Pinecone' + this.name = 'pineconeLlamaIndex' + this.version = 1.0 + this.type = 'Pinecone' + this.icon = 'pinecone.png' + this.category = 'Vector Stores' + this.description = `Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database` + this.baseClasses = [this.type, 'VectorIndexRetriever'] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['pineconeApi'] + } + this.inputs = [ + { + label: 'Document', + name: 'document', + type: 'Document', + list: true, + optional: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Pinecone Index', + name: 'pineconeIndex', + type: 'string' + }, + { + label: 'Pinecone Namespace', + name: 'pineconeNamespace', + type: 'string', + placeholder: 'my-first-namespace', + additionalParams: true, + optional: true + }, + { + label: 'Pinecone Metadata Filter', + name: 'pineconeMetadataFilter', + type: 'json', + optional: true, + additionalParams: true + }, + { + label: 'Top K', + name: 'topK', + description: 'Number of top results to fetch. Default to 4', + placeholder: '4', + type: 'number', + additionalParams: true, + optional: true + } + ] + } + + //@ts-ignore + vectorStoreMethods = { + async upsert(nodeData: INodeData, options: ICommonObject): Promise { + const indexName = nodeData.inputs?.pineconeIndex as string + const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string + const docs = nodeData.inputs?.document as LCDocument[] + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData) + const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData) + + const pcvs = new PineconeVectorStore({ + indexName, + apiKey: pineconeApiKey, + environment: pineconeEnv, + namespace: pineconeNamespace + }) + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + if (flattenDocs[i] && flattenDocs[i].pageContent) { + finalDocs.push(new LCDocument(flattenDocs[i])) + } + } + + const llamadocs: Document[] = [] + for (const doc of finalDocs) { + llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata })) + } + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ vectorStore: pcvs }) + + try { + await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext }) + } catch (e) { + throw new Error(e) + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const indexName = nodeData.inputs?.pineconeIndex as string + const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string + const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + const topK = nodeData.inputs?.topK as string + const k = topK ? parseFloat(topK) : 4 + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData) + const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData) + + const obj: PineconeParams = { + indexName, + apiKey: pineconeApiKey, + environment: pineconeEnv + } + + if (pineconeNamespace) obj.namespace = pineconeNamespace + if (pineconeMetadataFilter) { + const metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter) + obj.queryFilter = metadatafilter + } + + const pcvs = new PineconeVectorStore(obj) + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ vectorStore: pcvs }) + + const index = await VectorStoreIndex.init({ + nodes: [], + storageContext, + serviceContext + }) + + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + + return retriever + } +} + +type PineconeParams = { + indexName: string + apiKey: string + environment: string + namespace?: string + chunkSize?: number + queryFilter?: object +} + +class PineconeVectorStore implements VectorStore { + storesText: boolean = true + db?: Pinecone + indexName: string + apiKey: string + environment: string + chunkSize: number + namespace?: string + queryFilter?: object + + constructor(params: PineconeParams) { + this.indexName = params?.indexName + this.apiKey = params?.apiKey + this.environment = params?.environment + this.namespace = params?.namespace ?? '' + this.chunkSize = params?.chunkSize ?? Number.parseInt(process.env.PINECONE_CHUNK_SIZE ?? '100') + this.queryFilter = params?.queryFilter ?? {} + } + + private async getDb(): Promise { + if (!this.db) { + this.db = new Pinecone({ + apiKey: this.apiKey, + environment: this.environment + }) + } + return Promise.resolve(this.db) + } + + client() { + return this.getDb() + } + + async index() { + const db: Pinecone = await this.getDb() + return db.Index(this.indexName) + } + + async clearIndex() { + const db: Pinecone = await this.getDb() + return await db.index(this.indexName).deleteAll() + } + + async add(embeddingResults: BaseNode[]): Promise { + if (embeddingResults.length == 0) { + return Promise.resolve([]) + } + + const idx: Index = await this.index() + const nodes = embeddingResults.map(this.nodeToRecord) + + for (let i = 0; i < nodes.length; i += this.chunkSize) { + const chunk = nodes.slice(i, i + this.chunkSize) + const result = await this.saveChunk(idx, chunk) + if (!result) { + return Promise.reject() + } + } + return Promise.resolve([]) + } + + protected async saveChunk(idx: Index, chunk: any) { + try { + const namespace = idx.namespace(this.namespace ?? '') + await namespace.upsert(chunk) + return true + } catch (err) { + return false + } + } + + async delete(refDocId: string): Promise { + const idx = await this.index() + const namespace = idx.namespace(this.namespace ?? '') + return namespace.deleteOne(refDocId) + } + + async query(query: VectorStoreQuery): Promise { + const queryOptions: any = { + vector: query.queryEmbedding, + topK: query.similarityTopK, + filter: this.queryFilter + } + + const idx = await this.index() + const namespace = idx.namespace(this.namespace ?? '') + const results = await namespace.query(queryOptions) + + const idList = results.matches.map((row) => row.id) + const records: FetchResponse = await namespace.fetch(idList) + const rows = Object.values(records.records) + + const nodes = rows.map((row) => { + return new Document({ + id_: row.id, + text: this.textFromResultRow(row), + metadata: this.metaWithoutText(row.metadata), + embedding: row.values + }) + }) + + const result = { + nodes: nodes, + similarities: results.matches.map((row) => row.score || 999), + ids: results.matches.map((row) => row.id) + } + + return Promise.resolve(result) + } + + /** + * Required by VectorStore interface. Currently ignored. + */ + persist(): Promise { + return Promise.resolve() + } + + textFromResultRow(row: ScoredPineconeRecord): string { + return row.metadata?.text ?? '' + } + + metaWithoutText(meta: Metadata): any { + return Object.keys(meta) + .filter((key) => key != 'text') + .reduce((acc: any, key: string) => { + acc[key] = meta[key] + return acc + }, {}) + } + + nodeToRecord(node: BaseNode) { + let id: any = node.id_.length ? node.id_ : null + return { + id: id, + values: node.getEmbedding(), + metadata: { + ...cleanupMetadata(node.metadata), + text: (node as any).text + } + } + } +} + +const cleanupMetadata = (nodeMetadata: ICommonObject) => { + // Pinecone doesn't support nested objects, so we flatten them + const documentMetadata: any = { ...nodeMetadata } + // preserve string arrays which are allowed + const stringArrays: Record = {} + for (const key of Object.keys(documentMetadata)) { + if (Array.isArray(documentMetadata[key]) && documentMetadata[key].every((el: any) => typeof el === 'string')) { + stringArrays[key] = documentMetadata[key] + delete documentMetadata[key] + } + } + const metadata: { + [key: string]: string | number | boolean | string[] | null + } = { + ...flattenObject(documentMetadata), + ...stringArrays + } + // Pinecone doesn't support null values, so we remove them + for (const key of Object.keys(metadata)) { + if (metadata[key] == null) { + delete metadata[key] + } else if (typeof metadata[key] === 'object' && Object.keys(metadata[key] as unknown as object).length === 0) { + delete metadata[key] + } + } + return metadata +} + +module.exports = { nodeClass: PineconeLlamaIndex_VectorStores } diff --git a/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts new file mode 100644 index 000000000..eeef6f693 --- /dev/null +++ b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts @@ -0,0 +1,124 @@ +import path from 'path' +import { flatten } from 'lodash' +import { storageContextFromDefaults, serviceContextFromDefaults, VectorStoreIndex, Document } from 'llamaindex' +import { Document as LCDocument } from 'langchain/document' +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { getUserHome } from '../../../src' + +class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'SimpleStore' + this.name = 'simpleStoreLlamaIndex' + this.version = 1.0 + this.type = 'SimpleVectorStore' + this.icon = 'simplevs.svg' + this.category = 'Vector Stores' + this.description = 'Upsert embedded data to local path and perform similarity search' + this.baseClasses = [this.type, 'VectorIndexRetriever'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Document', + name: 'document', + type: 'Document', + list: true, + optional: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Base Path to store', + name: 'basePath', + description: + 'Path to store persist embeddings indexes with persistence. If not specified, default to same path where database is stored', + type: 'string', + optional: true + }, + { + label: 'Top K', + name: 'topK', + description: 'Number of top results to fetch. Default to 4', + placeholder: '4', + type: 'number', + optional: true + } + ] + } + + //@ts-ignore + vectorStoreMethods = { + async upsert(nodeData: INodeData): Promise { + const basePath = nodeData.inputs?.basePath as string + const docs = nodeData.inputs?.document as LCDocument[] + const embeddings = nodeData.inputs?.embeddings + const model = nodeData.inputs?.model + + let filePath = '' + if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex') + else filePath = basePath + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + finalDocs.push(new LCDocument(flattenDocs[i])) + } + + const llamadocs: Document[] = [] + for (const doc of finalDocs) { + llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata })) + } + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ persistDir: filePath }) + + try { + await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext }) + } catch (e) { + throw new Error(e) + } + } + } + + async init(nodeData: INodeData): Promise { + const basePath = nodeData.inputs?.basePath as string + const embeddings = nodeData.inputs?.embeddings + const model = nodeData.inputs?.model + const topK = nodeData.inputs?.topK as string + const k = topK ? parseFloat(topK) : 4 + + let filePath = '' + if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex') + else filePath = basePath + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ persistDir: filePath }) + + const index = await VectorStoreIndex.init({ storageContext, serviceContext }) + const retriever = index.asRetriever() + retriever.similarityTopK = k + + return retriever + } +} + +module.exports = { nodeClass: SimpleStoreUpsert_LlamaIndex_VectorStores } diff --git a/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg b/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg new file mode 100644 index 000000000..52c74432b --- /dev/null +++ b/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/packages/components/package.json b/packages/components/package.json index bea9a7a06..d1485b37e 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -21,7 +21,7 @@ "@aws-sdk/client-s3": "^3.427.0", "@dqbd/tiktoken": "^1.0.7", "@elastic/elasticsearch": "^8.9.0", - "@getzep/zep-js": "^0.6.3", + "@getzep/zep-js": "^0.9.0", "@gomomento/sdk": "^1.51.1", "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "^0.2.1", @@ -54,6 +54,7 @@ "langfuse-langchain": "^1.0.31", "langsmith": "^0.0.32", "linkifyjs": "^4.1.1", + "llamaindex": "^0.0.30", "llmonitor": "^0.5.5", "mammoth": "^1.5.1", "moment": "^2.29.3", diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index 6752f9440..53d72cb45 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -91,6 +91,7 @@ export interface INodeProperties { version: number category: string baseClasses: string[] + tags?: string[] description?: string filePath?: string badge?: string @@ -107,10 +108,6 @@ export interface INode extends INodeProperties { search: (nodeData: INodeData, options?: ICommonObject) => Promise delete: (nodeData: INodeData, options?: ICommonObject) => Promise } - memoryMethods?: { - clearSessionMemory: (nodeData: INodeData, options?: ICommonObject) => Promise - getChatMessages: (nodeData: INodeData, options?: ICommonObject) => Promise - } init?(nodeData: INodeData, input: string, options?: ICommonObject): Promise run?(nodeData: INodeData, input: string, options?: ICommonObject): Promise } diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index 404f7c75d..ceeb402a4 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -8,7 +8,7 @@ import { DataSource } from 'typeorm' import { ICommonObject, IDatabaseEntity, IMessage, INodeData } from './Interface' import { AES, enc } from 'crypto-js' import { ChatMessageHistory } from 'langchain/memory' -import { AIMessage, HumanMessage } from 'langchain/schema' +import { AIMessage, HumanMessage, BaseMessage } from 'langchain/schema' export const numberOrExpressionRegex = '^(\\d+\\.?\\d*|{{.*}})$' //return true if string consists only numbers OR expression {{}} export const notEmptyRegex = '(.|\\s)*\\S(.|\\s)*' //return true if string is not empty or blank @@ -587,3 +587,54 @@ export const convertSchemaToZod = (schema: string | object): ICommonObject => { throw new Error(e) } } + +/** + * Flatten nested object + * @param {ICommonObject} obj + * @param {string} parentKey + * @returns {ICommonObject} + */ +export const flattenObject = (obj: ICommonObject, parentKey?: string) => { + let result: any = {} + + Object.keys(obj).forEach((key) => { + const value = obj[key] + const _key = parentKey ? parentKey + '.' + key : key + if (typeof value === 'object') { + result = { ...result, ...flattenObject(value, _key) } + } else { + result[_key] = value + } + }) + + return result +} + +/** + * Convert BaseMessage to IMessage + * @param {ICommonObject} obj + * @param {string} parentKey + * @returns {ICommonObject} + */ +export const convertBaseMessagetoIMessage = (messages: BaseMessage[]): IMessage[] => { + const formatmessages: IMessage[] = [] + for (const m of messages) { + if (m._getType() === 'human') { + formatmessages.push({ + message: m.content as string, + type: 'userMessage' + }) + } else if (m._getType() === 'ai') { + formatmessages.push({ + message: m.content as string, + type: 'apiMessage' + }) + } else if (m._getType() === 'system') { + formatmessages.push({ + message: m.content as string, + type: 'apiMessage' + }) + } + } + return formatmessages +} diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json new file mode 100644 index 000000000..971aeea5a --- /dev/null +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -0,0 +1,855 @@ +{ + "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 438, + "id": "textFile_0", + "position": { + "x": 221.215421786192, + "y": 94.91489477412404 + }, + "type": "customNode", + "data": { + "id": "textFile_0", + "label": "Text File", + "version": 3, + "name": "textFile", + "type": "Document", + "baseClasses": ["Document"], + "category": "Document Loaders", + "description": "Load data from text files", + "inputParams": [ + { + "label": "Txt File", + "name": "txtFile", + "type": "file", + "fileType": ".txt, .html, .aspx, .asp, .cpp, .c, .cs, .css, .go, .h, .java, .js, .less, .ts, .php, .proto, .python, .py, .rst, .ruby, .rb, .rs, .scala, .sc, .scss, .sol, .sql, .swift, .markdown, .md, .tex, .ltx, .vb, .xml", + "id": "textFile_0-input-txtFile-file" + }, + { + "label": "Metadata", + "name": "metadata", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "textFile_0-input-metadata-json" + } + ], + "inputAnchors": [ + { + "label": "Text Splitter", + "name": "textSplitter", + "type": "TextSplitter", + "optional": true, + "id": "textFile_0-input-textSplitter-TextSplitter" + } + ], + "inputs": { + "textSplitter": "{{recursiveCharacterTextSplitter_0.data.instance}}", + "metadata": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "textFile_0-output-document-Document", + "name": "document", + "label": "Document", + "type": "Document" + }, + { + "id": "textFile_0-output-text-string|json", + "name": "text", + "label": "Text", + "type": "string | json" + } + ], + "default": "document" + } + ], + "outputs": { + "output": "document" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 221.215421786192, + "y": 94.91489477412404 + }, + "dragging": false + }, + { + "width": 300, + "height": 429, + "id": "recursiveCharacterTextSplitter_0", + "position": { + "x": -203.4868320229876, + "y": 101.32475976329766 + }, + "type": "customNode", + "data": { + "id": "recursiveCharacterTextSplitter_0", + "label": "Recursive Character Text Splitter", + "version": 2, + "name": "recursiveCharacterTextSplitter", + "type": "RecursiveCharacterTextSplitter", + "baseClasses": ["RecursiveCharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer", "Runnable"], + "category": "Text Splitters", + "description": "Split documents recursively by different characters - starting with \"\\n\\n\", then \"\\n\", then \" \"", + "inputParams": [ + { + "label": "Chunk Size", + "name": "chunkSize", + "type": "number", + "default": 1000, + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-chunkSize-number" + }, + { + "label": "Chunk Overlap", + "name": "chunkOverlap", + "type": "number", + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-chunkOverlap-number" + }, + { + "label": "Custom Separators", + "name": "separators", + "type": "string", + "rows": 4, + "description": "Array of custom separators to determine when to split the text, will override the default separators", + "placeholder": "[\"|\", \"##\", \">\", \"-\"]", + "additionalParams": true, + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-separators-string" + } + ], + "inputAnchors": [], + "inputs": { + "chunkSize": 1000, + "chunkOverlap": "", + "separators": "" + }, + "outputAnchors": [ + { + "id": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable", + "name": "recursiveCharacterTextSplitter", + "label": "RecursiveCharacterTextSplitter", + "type": "RecursiveCharacterTextSplitter | TextSplitter | BaseDocumentTransformer | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -203.4868320229876, + "y": 101.32475976329766 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": 176.27434578083106, + "y": 953.3664298122493 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 176.27434578083106, + "y": 953.3664298122493 + }, + "dragging": false + }, + { + "width": 300, + "height": 585, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 609.3087433345761, + "y": 488.2141798951578 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": ["{{textFile_0.data.instance}}"], + "model": "{{chatOpenAI_LlamaIndex_1.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "", + "pineconeNamespace": "", + "pineconeMetadataFilter": "", + "topK": "" + }, + "outputAnchors": [ + { + "id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "name": "pineconeLlamaIndex", + "label": "Pinecone", + "type": "Pinecone | VectorIndexRetriever" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 609.3087433345761, + "y": 488.2141798951578 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_1", + "position": { + "x": -195.15244974578656, + "y": 584.9467028201428 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_1", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -195.15244974578656, + "y": 584.9467028201428 + }, + "dragging": false + }, + { + "width": 300, + "height": 513, + "id": "contextChatEngine_0", + "position": { + "x": 1550.2553933740128, + "y": 270.7914631777829 + }, + "type": "customNode", + "data": { + "id": "contextChatEngine_0", + "label": "Context Chat Engine", + "version": 1, + "name": "contextChatEngine", + "type": "ContextChatEngine", + "baseClasses": ["ContextChatEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", + "id": "contextChatEngine_0-input-systemMessagePrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Vector Store Retriever", + "name": "vectorStoreRetriever", + "type": "VectorIndexRetriever", + "id": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "contextChatEngine_0-input-memory-BaseChatMemory" + } + ], + "inputs": { + "model": "{{chatOpenAI_LlamaIndex_2.data.instance}}", + "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", + "memory": "{{RedisBackedChatMemory_0.data.instance}}", + "systemMessagePrompt": "" + }, + "outputAnchors": [ + { + "id": "contextChatEngine_0-output-contextChatEngine-ContextChatEngine", + "name": "contextChatEngine", + "label": "ContextChatEngine", + "type": "ContextChatEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1550.2553933740128, + "y": 270.7914631777829 + }, + "dragging": false + }, + { + "width": 300, + "height": 329, + "id": "RedisBackedChatMemory_0", + "position": { + "x": 1081.252815805786, + "y": 990.1701092562037 + }, + "type": "customNode", + "data": { + "id": "RedisBackedChatMemory_0", + "label": "Redis-Backed Chat Memory", + "version": 2, + "name": "RedisBackedChatMemory", + "type": "RedisBackedChatMemory", + "baseClasses": ["RedisBackedChatMemory", "BaseChatMemory", "BaseMemory"], + "category": "Memory", + "description": "Summarizes the conversation and stores the memory in Redis server", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "optional": true, + "credentialNames": ["redisCacheApi", "redisCacheUrlApi"], + "id": "RedisBackedChatMemory_0-input-credential-credential" + }, + { + "label": "Session Id", + "name": "sessionId", + "type": "string", + "description": "If not specified, a random id will be used. Learn more", + "default": "", + "additionalParams": true, + "optional": true, + "id": "RedisBackedChatMemory_0-input-sessionId-string" + }, + { + "label": "Session Timeouts", + "name": "sessionTTL", + "type": "number", + "description": "Omit this parameter to make sessions never expire", + "additionalParams": true, + "optional": true, + "id": "RedisBackedChatMemory_0-input-sessionTTL-number" + }, + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "additionalParams": true, + "id": "RedisBackedChatMemory_0-input-memoryKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "sessionId": "", + "sessionTTL": "", + "memoryKey": "chat_history" + }, + "outputAnchors": [ + { + "id": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory", + "name": "RedisBackedChatMemory", + "label": "RedisBackedChatMemory", + "type": "RedisBackedChatMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 1081.252815805786, + "y": 990.1701092562037 + } + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_2", + "position": { + "x": 1015.1605888108386, + "y": -38.31143117572401 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_2", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_2-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_2-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_2-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1015.1605888108386, + "y": -38.31143117572401 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "recursiveCharacterTextSplitter_0", + "sourceHandle": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable", + "target": "textFile_0", + "targetHandle": "textFile_0-input-textSplitter-TextSplitter", + "type": "buttonedge", + "id": "recursiveCharacterTextSplitter_0-recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable-textFile_0-textFile_0-input-textSplitter-TextSplitter", + "data": { + "label": "" + } + }, + { + "source": "textFile_0", + "sourceHandle": "textFile_0-output-document-Document", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-document-Document", + "type": "buttonedge", + "id": "textFile_0-textFile_0-output-document-Document-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_LlamaIndex_1", + "sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-contextChatEngine_0-contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "data": { + "label": "" + } + }, + { + "source": "RedisBackedChatMemory_0", + "sourceHandle": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-contextChatEngine_0-contextChatEngine_0-input-memory-BaseChatMemory", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_LlamaIndex_2", + "sourceHandle": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_2-chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-contextChatEngine_0-contextChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json index c508b4807..2973594f3 100644 --- a/packages/server/marketplaces/chatflows/Long Term Memory.json +++ b/packages/server/marketplaces/chatflows/Long Term Memory.json @@ -205,7 +205,7 @@ "data": { "id": "ZepMemory_0", "label": "Zep Memory", - "version": 1, + "version": 2, "name": "ZepMemory", "type": "ZepMemory", "baseClasses": ["ZepMemory", "BaseChatMemory", "BaseMemory"], @@ -228,13 +228,6 @@ "default": "http://127.0.0.1:8000", "id": "ZepMemory_0-input-baseURL-string" }, - { - "label": "Auto Summary", - "name": "autoSummary", - "type": "boolean", - "default": true, - "id": "ZepMemory_0-input-autoSummary-boolean" - }, { "label": "Session Id", "name": "sessionId", @@ -252,15 +245,8 @@ "default": "10", "step": 1, "description": "Window of size k to surface the last k back-and-forths to use as memory.", - "id": "ZepMemory_0-input-k-number" - }, - { - "label": "Auto Summary Template", - "name": "autoSummaryTemplate", - "type": "string", - "default": "This is the summary of the following conversation:\n{summary}", "additionalParams": true, - "id": "ZepMemory_0-input-autoSummaryTemplate-string" + "id": "ZepMemory_0-input-k-number" }, { "label": "AI Prefix", @@ -306,10 +292,8 @@ "inputAnchors": [], "inputs": { "baseURL": "http://127.0.0.1:8000", - "autoSummary": true, "sessionId": "", "k": "10", - "autoSummaryTemplate": "This is the summary of the following conversation:\n{summary}", "aiPrefix": "ai", "humanPrefix": "human", "memoryKey": "chat_history", diff --git a/packages/server/marketplaces/chatflows/Query Engine.json b/packages/server/marketplaces/chatflows/Query Engine.json new file mode 100644 index 000000000..921ff1d66 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Query Engine.json @@ -0,0 +1,509 @@ +{ + "description": "Stateless query engine designed to answer question over your data using LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 382, + "id": "queryEngine_0", + "position": { + "x": 1407.9610494306783, + "y": 241.12144405808692 + }, + "type": "customNode", + "data": { + "id": "queryEngine_0", + "label": "Query Engine", + "version": 1, + "name": "queryEngine", + "type": "QueryEngine", + "baseClasses": ["QueryEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple query engine built to answer question over your data, without memory", + "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "queryEngine_0-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Retriever", + "name": "vectorStoreRetriever", + "type": "VectorIndexRetriever", + "id": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever" + }, + { + "label": "Response Synthesizer", + "name": "responseSynthesizer", + "type": "ResponseSynthesizer", + "description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more", + "optional": true, + "id": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + } + ], + "inputs": { + "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", + "responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "queryEngine_0-output-queryEngine-QueryEngine", + "name": "queryEngine", + "label": "QueryEngine", + "type": "QueryEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1407.9610494306783, + "y": 241.12144405808692 + }, + "dragging": false + }, + { + "width": 300, + "height": 585, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 977.3886641397302, + "y": -261.2253031641797 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": "", + "model": "{{chatAnthropic_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "", + "pineconeNamespace": "", + "pineconeMetadataFilter": "", + "topK": "" + }, + "outputAnchors": [ + { + "id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "name": "pineconeLlamaIndex", + "label": "Pinecone", + "type": "Pinecone | VectorIndexRetriever" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 977.3886641397302, + "y": -261.2253031641797 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": 529.8690713844503, + "y": -18.955726653613254 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 529.8690713844503, + "y": -18.955726653613254 + }, + "dragging": false + }, + { + "width": 300, + "height": 749, + "id": "compactrefineLlamaIndex_0", + "position": { + "x": 170.71031618977543, + "y": -33.83233752386292 + }, + "type": "customNode", + "data": { + "id": "compactrefineLlamaIndex_0", + "label": "Compact and Refine", + "version": 1, + "name": "compactrefineLlamaIndex", + "type": "CompactRefine", + "baseClasses": ["CompactRefine", "ResponseSynthesizer"], + "tags": ["LlamaIndex"], + "category": "Response Synthesizer", + "description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.", + "inputParams": [ + { + "label": "Refine Prompt", + "name": "refinePrompt", + "type": "string", + "rows": 4, + "default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-refinePrompt-string" + }, + { + "label": "Text QA Prompt", + "name": "textQAPrompt", + "type": "string", + "rows": 4, + "default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:", + "warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-textQAPrompt-string" + } + ], + "inputAnchors": [], + "inputs": { + "refinePrompt": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "textQAPrompt": "Context information:\n\n{context}\n\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}" + }, + "outputAnchors": [ + { + "id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "name": "compactrefineLlamaIndex", + "label": "CompactRefine", + "type": "CompactRefine | ResponseSynthesizer" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 170.71031618977543, + "y": -33.83233752386292 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatAnthropic_LlamaIndex_0", + "position": { + "x": 521.3530883359147, + "y": -584.8241219614786 + }, + "type": "customNode", + "data": { + "id": "chatAnthropic_LlamaIndex_0", + "label": "ChatAnthropic", + "version": 1, + "name": "chatAnthropic_LlamaIndex", + "type": "ChatAnthropic", + "baseClasses": ["ChatAnthropic", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around ChatAnthropic LLM with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["anthropicApi"], + "id": "chatAnthropic_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "claude-2", + "name": "claude-2", + "description": "Claude 2 latest major version, automatically get updates to the model as they are released" + }, + { + "label": "claude-2.1", + "name": "claude-2.1", + "description": "Claude 2 latest full version" + }, + { + "label": "claude-instant-1", + "name": "claude-instant-1", + "description": "Claude Instant latest major version, automatically get updates to the model as they are released" + }, + { + "label": "claude-v1", + "name": "claude-v1" + }, + { + "label": "claude-v1-100k", + "name": "claude-v1-100k" + }, + { + "label": "claude-v1.0", + "name": "claude-v1.0" + }, + { + "label": "claude-v1.2", + "name": "claude-v1.2" + }, + { + "label": "claude-v1.3", + "name": "claude-v1.3" + }, + { + "label": "claude-v1.3-100k", + "name": "claude-v1.3-100k" + }, + { + "label": "claude-instant-v1", + "name": "claude-instant-v1" + }, + { + "label": "claude-instant-v1-100k", + "name": "claude-instant-v1-100k" + }, + { + "label": "claude-instant-v1.0", + "name": "claude-instant-v1.0" + }, + { + "label": "claude-instant-v1.1", + "name": "claude-instant-v1.1" + }, + { + "label": "claude-instant-v1.1-100k", + "name": "claude-instant-v1.1-100k" + } + ], + "default": "claude-2", + "optional": true, + "id": "chatAnthropic_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatAnthropic_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokensToSample", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_LlamaIndex_0-input-maxTokensToSample-number" + }, + { + "label": "Top P", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_LlamaIndex_0-input-topP-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "claude-2", + "temperature": 0.9, + "maxTokensToSample": "", + "topP": "" + }, + "outputAnchors": [ + { + "id": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex", + "name": "chatAnthropic_LlamaIndex", + "label": "ChatAnthropic", + "type": "ChatAnthropic | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 521.3530883359147, + "y": -584.8241219614786 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "target": "queryEngine_0", + "targetHandle": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-queryEngine_0-queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "data": { + "label": "" + } + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "compactrefineLlamaIndex_0", + "sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "target": "queryEngine_0", + "targetHandle": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "type": "buttonedge", + "id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-queryEngine_0-queryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "data": { + "label": "" + } + }, + { + "source": "chatAnthropic_LlamaIndex_0", + "sourceHandle": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatAnthropic_LlamaIndex_0-chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Simple Chat Engine.json b/packages/server/marketplaces/chatflows/Simple Chat Engine.json new file mode 100644 index 000000000..b3854a519 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Simple Chat Engine.json @@ -0,0 +1,270 @@ +{ + "description": "Simple chat engine to handle back and forth conversations using LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 462, + "id": "simpleChatEngine_0", + "position": { + "x": 1210.127368000538, + "y": 324.98110560103896 + }, + "type": "customNode", + "data": { + "id": "simpleChatEngine_0", + "label": "Simple Chat Engine", + "version": 1, + "name": "simpleChatEngine", + "type": "SimpleChatEngine", + "baseClasses": ["SimpleChatEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple engine to handle back and forth conversations", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "placeholder": "You are a helpful assistant", + "id": "simpleChatEngine_0-input-systemMessagePrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "simpleChatEngine_0-input-memory-BaseChatMemory" + } + ], + "inputs": { + "model": "{{azureChatOpenAI_LlamaIndex_0.data.instance}}", + "memory": "{{bufferMemory_0.data.instance}}", + "systemMessagePrompt": "You are a helpful assistant." + }, + "outputAnchors": [ + { + "id": "simpleChatEngine_0-output-simpleChatEngine-SimpleChatEngine", + "name": "simpleChatEngine", + "label": "SimpleChatEngine", + "type": "SimpleChatEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 1210.127368000538, + "y": 324.98110560103896 + } + }, + { + "width": 300, + "height": 376, + "id": "bufferMemory_0", + "position": { + "x": 393.9823478014782, + "y": 415.7414943210391 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "version": 1, + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"], + "category": "Memory", + "description": "Remembers previous conversational back and forths directly", + "inputParams": [ + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "id": "bufferMemory_0-input-memoryKey-string" + }, + { + "label": "Input Key", + "name": "inputKey", + "type": "string", + "default": "input", + "id": "bufferMemory_0-input-inputKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "memoryKey": "chat_history", + "inputKey": "input" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 393.9823478014782, + "y": 415.7414943210391 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "azureChatOpenAI_LlamaIndex_0", + "position": { + "x": 746.5530862509605, + "y": -54.107978373323306 + }, + "type": "customNode", + "data": { + "id": "azureChatOpenAI_LlamaIndex_0", + "label": "AzureChatOpenAI", + "version": 1, + "name": "azureChatOpenAI_LlamaIndex", + "type": "AzureChatOpenAI", + "baseClasses": ["AzureChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around Azure OpenAI Chat LLM with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["azureOpenAIApi"], + "id": "azureChatOpenAI_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + } + ], + "default": "gpt-3.5-turbo-16k", + "optional": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex", + "name": "azureChatOpenAI_LlamaIndex", + "label": "AzureChatOpenAI", + "type": "AzureChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 746.5530862509605, + "y": -54.107978373323306 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "simpleChatEngine_0", + "targetHandle": "simpleChatEngine_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-simpleChatEngine_0-simpleChatEngine_0-input-memory-BaseChatMemory", + "data": { + "label": "" + } + }, + { + "source": "azureChatOpenAI_LlamaIndex_0", + "sourceHandle": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex", + "target": "simpleChatEngine_0", + "targetHandle": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "azureChatOpenAI_LlamaIndex_0-azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex-simpleChatEngine_0-simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index 9b1119b90..be077f979 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -589,7 +589,7 @@ "label": "Session Id", "name": "sessionId", "type": "string", - "description": "If not specified, the first CHAT_MESSAGE_ID will be used as sessionId", + "description": "If not specified, a random id will be used. Learn more", "default": "", "additionalParams": true, "optional": true, diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index d87d2c0ac..78cc42606 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -37,13 +37,12 @@ import { databaseEntities, transformToCredentialEntity, decryptCredentialData, - clearAllSessionMemory, replaceInputsWithConfig, getEncryptionKey, - checkMemorySessionId, - clearSessionMemoryFromViewMessageDialog, + replaceMemorySessionId, getUserHome, - replaceChatHistory + replaceChatHistory, + clearSessionMemory } from './utils' import { cloneDeep, omit, uniqWith, isEqual } from 'lodash' import { getDataSource } from './DataSource' @@ -387,7 +386,12 @@ export class App { const endingNodeData = nodes.find((nd) => nd.id === endingNodeId)?.data if (!endingNodeData) return res.status(500).send(`Ending node ${endingNodeId} data not found`) - if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') { + if ( + endingNodeData && + endingNodeData.category !== 'Chains' && + endingNodeData.category !== 'Agents' && + endingNodeData.category !== 'Engine' + ) { return res.status(500).send(`Ending node must be either a Chain or Agent`) } @@ -472,18 +476,15 @@ export class App { const parsedFlowData: IReactFlowObject = JSON.parse(flowData) const nodes = parsedFlowData.nodes - if (isClearFromViewMessageDialog) { - await clearSessionMemoryFromViewMessageDialog( - nodes, - this.nodesPool.componentNodes, - chatId, - this.AppDataSource, - sessionId, - memoryType - ) - } else { - await clearAllSessionMemory(nodes, this.nodesPool.componentNodes, chatId, this.AppDataSource, sessionId) - } + await clearSessionMemory( + nodes, + this.nodesPool.componentNodes, + chatId, + this.AppDataSource, + sessionId, + memoryType, + isClearFromViewMessageDialog + ) const deleteOptions: FindOptionsWhere = { chatflowid, chatId } if (memoryType) deleteOptions.memoryType = memoryType @@ -1377,7 +1378,13 @@ export class App { const endingNodeData = nodes.find((nd) => nd.id === endingNodeId)?.data if (!endingNodeData) return res.status(500).send(`Ending node ${endingNodeId} data not found`) - if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents' && !isUpsert) { + if ( + endingNodeData && + endingNodeData.category !== 'Chains' && + endingNodeData.category !== 'Agents' && + endingNodeData.category !== 'Engine' && + !isUpsert + ) { return res.status(500).send(`Ending node must be either a Chain or Agent`) } @@ -1396,7 +1403,9 @@ export class App { isStreamValid = isFlowValidForStream(nodes, endingNodeData) - let chatHistory: IMessage[] | string = incomingInput.history + let chatHistory: IMessage[] = incomingInput.history + + // If chatHistory is empty, and sessionId/chatId is present, replace it if ( endingNodeData.inputs?.memory && !incomingInput.history && @@ -1437,8 +1446,10 @@ export class App { const nodeToExecute = reactFlowNodes.find((node: IReactFlowNode) => node.id === endingNodeId) if (!nodeToExecute) return res.status(404).send(`Node ${endingNodeId} not found`) - if (incomingInput.overrideConfig) + if (incomingInput.overrideConfig) { nodeToExecute.data = replaceInputsWithConfig(nodeToExecute.data, incomingInput.overrideConfig) + } + const reactFlowNodeData: INodeData = resolveVariables( nodeToExecute.data, reactFlowNodes, @@ -1458,19 +1469,11 @@ export class App { logger.debug(`[server]: Running ${nodeToExecuteData.label} (${nodeToExecuteData.id})`) let sessionId = undefined - if (nodeToExecuteData.instance) sessionId = checkMemorySessionId(nodeToExecuteData.instance, chatId) - - const memoryNode = this.findMemoryLabel(nodes, edges) - const memoryType = memoryNode?.data.label - - let chatHistory: IMessage[] | string = incomingInput.history - if (memoryNode && !incomingInput.history && (incomingInput.chatId || incomingInput.overrideConfig?.sessionId)) { - chatHistory = await replaceChatHistory(memoryNode, incomingInput, this.AppDataSource, databaseEntities, logger) - } + if (nodeToExecuteData.instance) sessionId = replaceMemorySessionId(nodeToExecuteData.instance, chatId) let result = isStreamValid ? await nodeInstance.run(nodeToExecuteData, incomingInput.question, { - chatHistory, + chatHistory: incomingInput.history, socketIO, socketIOClientId: incomingInput.socketIOClientId, logger, @@ -1480,7 +1483,7 @@ export class App { chatId }) : await nodeInstance.run(nodeToExecuteData, incomingInput.question, { - chatHistory, + chatHistory: incomingInput.history, logger, appDataSource: this.AppDataSource, databaseEntities, @@ -1495,6 +1498,9 @@ export class App { sessionId = result.assistant.threadId } + const memoryNode = this.findMemoryLabel(nodes, edges) + const memoryType = memoryNode?.data.label + const userMessage: Omit = { role: 'userMessage', content: incomingInput.question, diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 0b1e62d25..0b37b6081 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -217,7 +217,7 @@ export const buildLangchain = async ( depthQueue: IDepthQueue, componentNodes: IComponentNodes, question: string, - chatHistory: IMessage[] | string, + chatHistory: IMessage[], chatId: string, chatflowid: string, appDataSource: DataSource, @@ -324,22 +324,30 @@ export const buildLangchain = async ( } /** - * Clear all session memories on the canvas + * Clear session memories * @param {IReactFlowNode[]} reactFlowNodes * @param {IComponentNodes} componentNodes * @param {string} chatId * @param {DataSource} appDataSource * @param {string} sessionId + * @param {string} memoryType + * @param {string} isClearFromViewMessageDialog */ -export const clearAllSessionMemory = async ( +export const clearSessionMemory = async ( reactFlowNodes: IReactFlowNode[], componentNodes: IComponentNodes, chatId: string, appDataSource: DataSource, - sessionId?: string + sessionId?: string, + memoryType?: string, + isClearFromViewMessageDialog?: string ) => { for (const node of reactFlowNodes) { if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue + + // Only clear specific session memory from View Message Dialog UI + if (isClearFromViewMessageDialog && memoryType && node.data.label !== memoryType) continue + const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string const nodeModule = await import(nodeInstanceFilePath) const newNodeInstance = new nodeModule.nodeClass() @@ -348,42 +356,10 @@ export const clearAllSessionMemory = async ( node.data.inputs.sessionId = sessionId } - if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) { - await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger }) - } - } -} + const initializedInstance = await newNodeInstance.init(node.data, '', { chatId, appDataSource, databaseEntities, logger }) -/** - * Clear specific session memory from View Message Dialog UI - * @param {IReactFlowNode[]} reactFlowNodes - * @param {IComponentNodes} componentNodes - * @param {string} chatId - * @param {DataSource} appDataSource - * @param {string} sessionId - * @param {string} memoryType - */ -export const clearSessionMemoryFromViewMessageDialog = async ( - reactFlowNodes: IReactFlowNode[], - componentNodes: IComponentNodes, - chatId: string, - appDataSource: DataSource, - sessionId?: string, - memoryType?: string -) => { - if (!sessionId) return - for (const node of reactFlowNodes) { - if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue - if (memoryType && node.data.label !== memoryType) continue - const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string - const nodeModule = await import(nodeInstanceFilePath) - const newNodeInstance = new nodeModule.nodeClass() - - if (sessionId && node.data.inputs) node.data.inputs.sessionId = sessionId - - if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) { - await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger }) - return + if (initializedInstance.clearChatMessages) { + await initializedInstance.clearChatMessages() } } } @@ -400,7 +376,7 @@ export const getVariableValue = ( paramValue: string, reactFlowNodes: IReactFlowNode[], question: string, - chatHistory: IMessage[] | string, + chatHistory: IMessage[], isAcceptVariable = false ) => { let returnVal = paramValue @@ -433,10 +409,7 @@ export const getVariableValue = ( } if (isAcceptVariable && variableFullPath === CHAT_HISTORY_VAR_PREFIX) { - variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters( - typeof chatHistory === 'string' ? chatHistory : convertChatHistoryToText(chatHistory), - false - ) + variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters(convertChatHistoryToText(chatHistory), false) } // Split by first occurrence of '.' to get just nodeId @@ -479,7 +452,7 @@ export const resolveVariables = ( reactFlowNodeData: INodeData, reactFlowNodes: IReactFlowNode[], question: string, - chatHistory: IMessage[] | string + chatHistory: IMessage[] ): INodeData => { let flowNodeData = cloneDeep(reactFlowNodeData) const types = 'inputs' @@ -558,7 +531,7 @@ export const isStartNodeDependOnInput = (startingNodes: IReactFlowNode[], nodes: if (inputVariables.length > 0) return true } } - const whitelistNodeNames = ['vectorStoreToDocument', 'autoGPT'] + const whitelistNodeNames = ['vectorStoreToDocument', 'autoGPT', 'chatPromptTemplate', 'promptTemplate'] //If these nodes are found, chatflow cannot be reused for (const node of nodes) { if (whitelistNodeNames.includes(node.data.name)) return true } @@ -706,7 +679,15 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component */ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => { const streamAvailableLLMs = { - 'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'], + 'Chat Models': [ + 'azureChatOpenAI', + 'chatOpenAI', + 'chatOpenAI_LlamaIndex', + 'chatAnthropic', + 'chatAnthropic_LlamaIndex', + 'chatOllama', + 'awsChatBedrock' + ], LLMs: ['azureOpenAI', 'openAI', 'ollama'] } @@ -729,6 +710,9 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod // Agent that are available to stream const whitelistAgents = ['openAIFunctionAgent', 'csvAgent', 'airtableAgent', 'conversationalRetrievalAgent'] isValidChainOrAgent = whitelistAgents.includes(endingNodeData.name) + } else if (endingNodeData.category === 'Engine') { + const whitelistEngine = ['contextChatEngine', 'simpleChatEngine'] + isValidChainOrAgent = whitelistEngine.includes(endingNodeData.name) } // If no output parser, flow is available to stream @@ -866,7 +850,7 @@ export const redactCredentialWithPasswordType = ( * @param {any} instance * @param {string} chatId */ -export const checkMemorySessionId = (instance: any, chatId: string): string | undefined => { +export const replaceMemorySessionId = (instance: any, chatId: string): string | undefined => { if (instance.memory && instance.memory.isSessionIdUsingChatMessageId && chatId) { instance.memory.sessionId = chatId instance.memory.chatHistory.sessionId = chatId @@ -893,7 +877,7 @@ export const replaceChatHistory = async ( appDataSource: DataSource, databaseEntities: IDatabaseEntity, logger: any -): Promise => { +): Promise => { const nodeInstanceFilePath = memoryNode.data.filePath as string const nodeModule = await import(nodeInstanceFilePath) const newNodeInstance = new nodeModule.nodeClass() @@ -902,14 +886,12 @@ export const replaceChatHistory = async ( memoryNode.data.inputs.sessionId = incomingInput.overrideConfig.sessionId } - if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.getChatMessages) { - return await newNodeInstance.memoryMethods.getChatMessages(memoryNode.data, { - chatId: incomingInput.chatId, - appDataSource, - databaseEntities, - logger - }) - } + const initializedInstance = await newNodeInstance.init(memoryNode.data, '', { + chatId: incomingInput.chatId, + appDataSource, + databaseEntities, + logger + }) - return '' + return await initializedInstance.getChatMessages() } diff --git a/packages/ui/src/assets/images/llamaindex.png b/packages/ui/src/assets/images/llamaindex.png new file mode 100644 index 0000000000000000000000000000000000000000..139c33eb027b7ea654e151a4c32ef6a5506259af GIT binary patch literal 28343 zcmd3Mg;QKlur;=@IE%YO@C0{vch@Dz;;w-NXK{CT2<{HS32wpNT?2#=;PLyu_eZ>{ zy|p#BcJIucKBuSqbVsPE$fBbVqrkwxpv%iiX~4k1a=l-iK*aYuy&-QO-oIeoG-M@U zYNkj}-UkTQ;>zMMF!gb$|G2eX}N$Ks}8w;XGV0h~NOP+x*r9$5G3FJkl$@;}4mJ$=_i(cn63LVI{} z!p3OvWjS+@oCF-z>}uz)>Qn#f)0)#&=0;aj8qNnO|C`%YMB5Dqb=vo;?-S7HvmTGf zXOphSOVR1>tDZ**DJ}5Pjki!%Zr!~wd7Sn99$<3srU}htMu=Q>eEa{vz+t1WOhuS{ zdh!Qq=E$B6(Z95L;s2J#$`v<)jq-F7G#T6a{}=n;op}y#aYMRFve*y_unT$I(ePe) zwjDyhP6AEKEjYMNo5_;o&Rb}SaUbS&d)SlNk5mhnwM?EYa1)WAISL> zo{Unba(#c6`hxGg9dSXPz2-r=R#N8de@Ek92>rL#db3*UAj&G!YQ8Rghl^o_onGhp z-)?3p3i94=;^uOV1uGIy3o3rfj~z9duVfUIzdi5V7=7%@uZ9UJSDTm@_F7k<@O|t& zX2lc|lf_(@2=z-Crq`=N;ZMeHZT^PZeRkngx!d4BOA^JFg0*tUtu4Ow9f37i!f_ynQ#n|4mQ}+;HJ{wQ}sz z)}B|`)m|?Gk5U21$TlUJuwOun)0%=CY801%ht9@2QAT>WDiOpW&DSdz(;b$vVe4kF>?-#W zItwOf($-)}IS%}CiGNlpm{N#>DamM5fPrzfS)bFOj;c8Xdl6)*yS$Tfl&mgSHWI-s`ZUlApyIFlj@<~k*(#buGt-nxH)wW|v zRbI6kM@D|%>(^6r^|Q+@1zB&s4vP%L%z)=nU#0UgKTO2NS?ocJo(Y+fZ zZj%|EK(^Ml`)v_{had1_u6INQ3BV^?rgSY;(Of3JHvy|I%v15Nd+;HWfB)Y*MUcdaLhrs(Bf&J^GDV+T5Q;Siq6;eo^SH%=BKfatG^}Icox7JP{dJE+??*bfRlE*9s zmzq_?NZZ>7T1kuJ{H_OnKeT@39V%iMoVfQEvg<=#C1%iO;>uFdWXo%1JcF5nvD0nC zF73piD2#hiy((IBblF>l(_R=gc3BNk8U~`o_u>Nz?|JN zeqK>v{=z)Dr%%^YRs}OCrTaqBd8E_va{QGXqsFm%iYOX`M-gWJyBl|^S*|}o^O5I2oFjZJUhG0r}*~vd@?iTpLkf!6=!X^ z`dc3M?m-VYuVIY?#60mG3?IF7WZ=xF@DBS*H2D|88|5>{*X@Hsq=}tGkQ(P}XYRjW-zo-vbo+ zUfuUptDXLWLq+PKFpY|~K?hoP8Xm09FP$ebU0V@aY03*bjU#(5HZJ0vh{scX7r*ktmi*%ZZ_4pZLpLtq^mDv z_xJAVbb}Izvva17;Ta_On@bu6VqOG z^t`LXxl)u2BjAY}DX|17Q{bV5eoj6;&X=<&s$9Iqjq>UEJi_y2=E8Imo(V-JXicsfpNiLXYEZ2adw6Jd!St+*rjw|}H^WvZ+{lUVfomp3)Kk`hW_H3!m2^?aL&= zTjZ3fkR8Xh_u-_!)ZO=pp+xr)GTC>pF;eO+L=I?uHF))hq_}2G8Fxtdytaos@{0w- zza-K>BJ(5_6Z{DoD&)##oue7aE0~=Hq5O=yzJrisLQutnQ&PtXj@J*EanZw1e&psX zZ1slrCjhk-SnShv*UXUi49MNx*K)Cr%2JxY4YM_^YzP|>Ekuu?L;c{L-y zn3(w_{Ic%G#m{fU`5s@9pb!IpW|YqR;Kr@MVq!={W2nNQI08i))m>c1yP;s&?vc(gE?D9i)}V~%6=mx?$K zUE9`sM3r#(ADtXos??o3p?M3zb;#CzX(4L^Q*MdXZ#S>6{rzjR_WI&}mP^Lf_J!i& z>a0i*X#>>0UX>fwYsw;Bd&%CE1}N zLJdPjDug&hE>!+*y`cs^6N$p+w6e}dq+f}JxTGxCC)ULK0MB6bk6_L_j&2`&g#+XJ z#21XxCarATeS=iGJ>LT-`U*8?a`*5bdI_Le^>Fq4NRVH5dw_dLATy*U?(eA`S^nB? zml~In{~BUNkzz0B_KkN}KDStCHNCSMexVkvlsX;>9wop9nDSRl8eL=#B;)@FS}9i0 zb*A;(=w{wkhm@v(;U2vq-{Xt;op&-19#?rJsG?ad=!bj&8G;pexd%XNIp|({--sd! zR?NS;Qlzn4P+D02ImIH*TGPeD-#_qN~Gw+L(0S*`2+ESffSEa%){zAXGtTfAUOVtvLGo)+G z{yA@EY5|Yon;m4_+ zSKtddXuNiz<@dp5X>SH%>pKPRArwP?^U>tyyQkiaq>G=$v#5H^B48M zy{Y(48*Ztr;lN8hWfC$nN&KR+1`H_UQJxK$?{Q!WgFcY@Z~=-?Fm`~bL`UNL(VzOq znB_D7w4m1?w%MD?@_IaAU_aH0`3MEQ2awK>FOPEt#z>4r@`O9clmk+N7FCD5*Hm&0 zQFUmX9vCXdfvinf*0*Z-I=Sr?KU@|ji4u|um{!dw+x&kbcnrnh9o;kun#`n%o9vJ& z&ppTXgtmA+d=!-(Dr}^7BnmM`T3#YSqqZU?h*G^>J`GNH5y1lQIa_}dE%EiGi_xsU z^IpaI2vJtMS)CUZ^V(|76X;kLfCs3w;$!2jgc~}oACT+(K;X3p)QV;o=hoFoyZxHo zvk`qq<`8t`bwCy}eQBJ${U#xFF4>?bFW|oLDI|74;3q|Yd_sZ%aW95Jl0`v`*LITU zVFhAbSq^*mR3idj_?nZTQa9NKF56v!`A*^!bIX_n)+Zg_2_Gu^!#*zZQiXJXeD?KE z<-_bs`%N+`EW%J4KYIa&=<6JJ)~%JCN)X6$rbQC`8GEU{; zf6cpbQ<@<^TCQ%Is7z*v^Y5&?bA;WS&M4`|Vy+w`s+9vUfDR<{S+#iyw=j z=+2(n))`ni+AcEFkT0IWhZ){!PL^!(9;E+=V`a0I0?D7%xZtyis@>Lbg;3~_)Zri> z3(k}H$icQEztZj+pI`f4ggcaa?!uxsS^8OH(T2okbp9d!!@b0U{&8t;>;>-GInVPu z_}Jg~3vOt4$EYo>A8;4XS1wiX>HON6KPnMQVuS?&jZs4n@S}y|r#vi)!toeDbWrNm z2J=!cPGy>NwY0TF&zX>k0r83Gwm_-xlG!8km+!H0K#%7aMb*3~!boXWS|PJdwNYGP z93F@CsRXnM9dH3L#Nq~V4%-YkR0ehCVu^)2)Uy#={6~c5+}#Rz%&%ALJvo4Cs{#iE zsdXc93+!av|78iR4skIJ12y)@j8kyDLh-WC`_{ihIV6-Nv(SIz3u8aN0bStILtOw^ zZq%GU>2La0Pg%IiTeE zGr18Bt`LqrFrD38pNh1a&x?)-hz%$s;~@NBA5ov>KG|B(E`5TSx1fd z;6#8!4c(5^Loa)s3l^+r#!%}bGo;jCciqHhCW;-Y#*~_GvGA>Eetz@aXcG57+JbNp zd?s(JVUQgOS#&YQ#EetzqU7lWZ0ai0eE-eEVD{3Ak5FoU@S{;U`g?lIrq*tXu{tIuqVcf zFR3+Mat9VYd#j#MGny3c{q}Xf;O;8V7)`Wi9YqPygZ6KEW#_bow~)hs-bG8n$3V}> zjEdc^4*`lwBdG*MMgZ=b1C+#wpvhC;^eH4|V4r5>Zm+<2coS`2%;sajRe8FAOjN=JvdB3;;v&f z>%~2hyRK42?2#ilMucSx%$a4vH$jUqjdMK`Wo$Xc+3H;KTchs98VmCqk|2Gf@d3@Z zrhqc^_unAMEmy2FOdj59d9pU$Ba&!zyARkQV9u89%6q=5l$E`!K8pzJgx(kbd^&`D zVLf-KZy}~e2Ob|niMl=lSqMNFtIpWK1V~!u?jTUsYMWK zL)=5u!`8}2Nlg+^Q}>6(ICgvUFKtZUJpv3b!u7Dh^=btHLss5Fyqn@|=#ZSW+#uq> zyigQYlnT^eZ7@e8q#bQDQTy7HPyr%qma3n6e{m1)f4}bQX`NHsYP=$B(TuGw4ey_>lX_jB0>6 zWI;ZAo(`=N<74Jv3xBLzdEzJ$XYmfgWt6g)NkC?ViiWE741aiJotM1dMsw3pV#qZQBVft z5TL|}M2Sv%#+|u-Umb{gNUUkFAt(U+sz|*3NeZPO0TsO;QC*ATBA;zF5P7td(nn`v zUwa#iw=^xWsXJ&S&-`{`qUz>#r~0!Vo)K zZ`JB21|&r+_-yNnDL-M+61|FBM4L7jn`u`J0jSKGgu0SPQq@+Zr$bU@jS-zHQqqJZ zU}o7uCKl6LW-y{wagK6}w_d|V^JP^;q;JYW{UpkP-#)W@Cl$k%;38~Yw(~Eh z0>+1y18Gntf;*@!sPW4OeHL-}gi^A~bcd&BQX;4}WhdKulw>55y;`&nU$7KgV5K)# z3hI0+9fxMuP$=d(qLwFi;bI658Ip*~#?HA8tnnGU`BaHim`lqb>`4Wop#V_tzy9oh z_4-6dFg^z4*$&@INw!~qIoTG7J5%y7*oOPVAq}~jgEg3|nPe_!Yj9*pWHM3ov2^!| zVV;Xe-|(628l4vvYK(TftJM=LF#>MJw?cm+jCMyw2Iojcpll9*OFS^!MZvFS>I32l_8H3K6L)NVum=6<=H{Qg5__n*ai8 zks}|J=oG&_y4WR`yimI3Qmtnq)j4&b!+3wMl*0n&z^%n7u(dOu`>JA`wIMV$r^rWT zd~#ZoKdW@&nAURMMnN@Z_I%&cfUsA>jWjopsHp!pEuKCt`Z8?Ou?_dX3}bw^_tj?p ztf~lP2#%>ugA`yg5HpMdALqDTnmJ_B_dH0?6 ztA^G3P+J&n0ySAF4&0{HGy&)=&V=_cgP?xoQ-qiS14OgDg}?CaL!>E+bYs>8jGUs< zEA2K0=^3RrKbg5i{cF=X z9R-KU%wa%kW9?lgmrx~jak~YM2dR)~P7-9|A8ZxdM%DS^O(;C*i+WQnMLrM;m*l|m z0wc8<${;of{HRl%{wI2Z6^`+VX*F*iW!+L)8<&0Rm^`11@b761UkjP{AMJUZeO@AweKT)nsxjxXVnyU~A~1ORy`eVqx}g)T)oPNB!3R3uTTzIc1DXyD zhOL5)SAS`vQQ*}t@O>8vNdM7(aveyVZR-)}M{6{RRN4b-2%<4qt%0jY4Z_k;<$7Q}b_I)H%@(E!w1WfbOh;(EY z@$d*{w?0x3gH5mKbk2)FGL_wc%ji!@cybd#k1#9x#{9qIGN8q z9nq7V^7ORSl}jSUJi$1S_mUiWT4oq^m8lJ1+)#aj2k4lF$KC)gNyv}brnL75QSCQg zQ&vfoia=-~`YQ7SAq7_2>uS>=E;2{+Ei<}~aQq8u^aI>;)Il^~+%V};w&IHvJ`j3e zR2aj0Z)nT9E3-rI5uwMX4{wR*Wsz24lzCTzYk}C1`6k!?%FKHq3b4erzfKF$P1zcL zYcnS3FW99DvK*2>{9)gLlBTDE#g&EV!f~(MClf?}XU7xi`pcnyOKxVn??i}PtbF3F zBZ&T>U4$p1T}Y}xo{H94C>rT$CA>}h9Eh{KxWX$I9DZ499DY?2Xxv&iv?w3yn$YjV zLeYmqBd~P10(qYwaUEA|$sS33z$p;}^XQ-0AUjd5m>(`4d$LP-1hv#TC7+z;jf7rx ztwE;#ZNZwBIF9)Kg+gbdNoADxaE{^UGTslk)xi{imq78-T>x`+|K2oKRSTT;px*hi zWEYwXPGfAdvm&p@x-ZH7`Wxl_xu`;gTDB`)%Yv&uh{xLP3}x}>FY^2TSoowvee(Y@ zcmgd!U}n@m+auZU@i5I(Gn2FvZ<5(fXi~+8(fJYAE+*up1|F3md`qPX@O^Nvsm6I1o~7pRkEyk)`dbi4Aa z4Ud!_y|0rC9%p0=#?fk?;MAc{PM6U&!9%6rgd}%>(uL~n37~_E;XShRL{gP&5t;qW z!J57WuBEb(A=K4|(Gp3go_lon^J5Z2lbIP-Dy5qAK$)OaYuFiACW-usmxhFVShC2D zMePVd=UH-V;}BG}Eiy-2-#U@qOJiM_z|bK8z-_5Q+{-KjyyKEP0NTuBL5T7n9M2}- zBMflqro~qLfMCu0t z^|E(=(%z$87W4^+pQO@w&29>ADaks1fnrej<6H>c3J@>1r)iMoXbtw@Q;nQ>pxCtUEJ*n(Y^5gvImVahs+_x;5>2B&cd+`^k^%;os)eLZy0lnWf70m;5jIvSj1oUz5=uq<7B{b8fW*R^AisRG@DLzG|F$%KFgies2kq}#-#jy&k zJIkS8=I{3vxgQ_DaJ4=25HEW`{|dJLyhkhuj=G7BO!>JGMO=30NwWgTB=$+^dAaB< zb^3aMqop*hUAyFoSk~iPE_8nkxSb~d_x0~V+$-?~$?|~i*E5q($BkI%mfS(5=7>7j zT*+Pl;GMO;k9nhpb&m1-=`lfVPXvaZMG_ApV>oFg5@2#3z@h4A`wIF<)XlXm;Z64H!sv{y|!?gi}!%Y0|)&D+=zCChzo;0@*aFa@l zhA$#?GtTt7#cz5X2}1IV?oFMl-M}y}BY176|ACuI4~(V#=q2JN)(O z2r&>@Sk{@S8spsXL5JxsrA_XgMDWNS!g6LxF@eRhBOQyS#A z;wt=+>kSvYlbs&RL1j(@USM6@ZYbX+7o0n$E;J|zRt<4V4>G(uH&k20!1x4dVzOep zT~0)YM=#1DJ%TM3pICqSC79De5xIh?vL_{YOT!asD-v{m<|NUkBA{xz$j}*wZ z?B7SRq16!X*!$w}yN}}4%YA=FP>Nyg9s?4rIEiCHol_9;gv_)}{x!;k;aI2!D_^to zrb7r-YU;ur#xpe4!=JCa0Oof6h1=ALybbzkwNi6vf)$4alYRC9Mxa;k%eh4=Okv0k zkf7g2N?+f89FAUxY$OV+#7r_8h?dqz&%S^P#~$U{A*eL?TENg^*<$MCBAy?#t7pck zlm$dul~gOb-tZ6frb{wz`uqm}JMi1z#-_JXVceY^Kf%aMf+owEwQCx;J9i(?bk`D6 z)ADL4ki+`5 z0JRN0L?CvaQOnurOi6&^c~p`R!_E9!`kZeXk4owM>>)@M8H%N6J*PG-!;*0On46gXytW*H0l&=inn!PO{A7Plb3XZB)8vnJ$%vSlBp=FAyUQ<9o=V0x!6pmQhd81vg@9fgI- zRgm&@dF#+=m2sNMZg!Xg$;|aoE_n^vR?HjqSwki(9y1WT8^S1%D0`*084i@NwcCVj z*4zgOmHO}};WE_}nSVIYM9-<@mlFk8SY!;_5T-R3tO zo}tB47)oML-SAGQd4AH7BUyd!a^IZ9hhHSHbyZEIegWHLQ`IJ<145MqLoIm-Ql3ZjgC0mG8lk zD1XB_ZM$IQr-yLP(5AXmUX1Q6%|7HjA6$@G>B0jHqkn*Za}66Ad{oXZvu5k2b+g&m zBgq2&E8H%L!7#<43{f7uU}2({OPcx#2Ei{;#rd-gsO>N{L{NPL5>^L0?0}e>4jkbn zO7V%tj*T)F)zy&J#5x9707GaJfdasBCs&oMFX3umqdeFWr+Qa%M&Rb~uf7y{*jTmM zjxhl3j2?%EUOeH-NkrVp9kCc7Yr+_ikB}0u1L>;L<%&#a?7FnEFg#CLvx`b?8IeF7 zkASea>9-qo--|JQrk&57Esc&cGgKA~pZ6}W#Ps%LxRe-M8}~41#>DL7O~`l*U;{O& zBsxTbk?UuDxO~WygXNO0oEtdTc%Cl5{*e8;v1`H&uWDvKBMCN)8#bk-sYiUq9k7A6G;7Ht>KS{!;8KO-~a&skpu5HFHFP2F6Oq&905M@$Hw4lzFR)%k|3p+M_xzC`EqTtjuxtHd2Vthen#>BVAGxP|E z+pS1Wh*bdLO}4;EIo&*|TpwC8xkK>Nyvj%uJFL7b_ z$G~v|8`K&xW+DA0Q}cqA7Gj|zM3ay&Wkt0ZU zS?eRQt!Hgz)pPV-#EIkOhBKd&0|t+ZtZW6`_GQ7Et8?2R4X7+h7@DDQ(sgJD@J=4|e6rhGIHckuvwc zke5+=$&j(z(SDC)mbf$)5Af8*=73=z$WdsukYtXV-*+HgG-a(@a8LK)-bN?w^H-0L z-8-P}w;h0S`Ij4;OCDLK@opDnQjBTWa*suYHiF(T>19$XaRH-6PbEDiC{i`2o(>b# z8`ELdXGU)IkYyu`*$SdsCHT-RGIIf!?2!i>>QUNejtCmS4*Lboz|OWunylhN+}3g$VHt_Fwc8z5RX||`(Z(Lqu32H-rUyyUw4{2hC^ea zqyWI>q7DmJ7vY*&T8d9u03a3Y55;6*1DIQlQQ4363ox$={;V^&vHv=t(+6BJ6NRlcG!pnDaMiISQ*y7HX#KcmlMWi z$arqsDAQ4K6-DodoZBNHPU56T%x?qyVXQ+Z6Iy`IjP+}|r5bmGM{Y+iTT`ufQHB}B zH<7DD?_FR=YF==Z4E^YRnJDHi}`F0Z^`Xf|rZe6M&EoVwoG zV3pGyY#_q7J#PTDs&!;n#C*KAf_vddGux!1`y+5ei`eD&^41gIOHVjjEV$4xlYfN8 zknn`5V4;WE+3iXXL_73yxr%%-=kMl&bg1Y1qAECGaG8VnL{rsY0WIks11kgBM;sS_ zMjGcU86F(Yljgaa$$hD0$)`FUs~H|Fa3fgyfn;)-PmDA*u?vd>W>BdIY1`uG)wcVR zf+UHf5TvAnu3Ri&W2=o6%fM?|t(G&PIM*W5ob~k({V3kofCvu`CLum{3=EC*#QnpB z@hb~nr3Xc=Sx9g`>_n_?>eFCQI+d!!)puM=TlRM$BPZAqJ~o{&mll zqasvn&(mMbSveAD8LjsX^Jd-I@0Ijiq)QwHsmm4D;$NgYl0tdv2x>+HJ07w>iLJA; z9Tl*Z@ZbJ*B+=PTC~tEP_IIRvKXyU$qB>q+2V5;F+}6hJl4u}}a(E_j_(%N2KcoRp zp>nJr^mG}ReHJ5NTzvGLvDoTMI#kpfa}}&~5_`ZZ?kvA>@tNBYqA4 zLIc0nU$5!@XDgP)&0DxSK+lW^&B#SkZ%B%jjITL8=+5ADpy0Daw7WM07$2#AiqFVl z+|#is)YkxC&ag$K|F2QGKtR{&tKi8G^r-m0jP!qoaY=~vpQo#97go#F@#NpVl6xa< zkqDBP+$_&6UIfw?l2bt&-zwaC&m11yl{_&+rW&zTd$n3JASwQ|f^*(LFZSkfzS}!@ zr3USno2S$;L|E8C;+_UAwmC$#^cqZl*)@tEi^&|97FwT-Ooo)+YpZtDR?q=i@>Mu& z9`U|pZe*)1eirrQCb0Lew_vut1=>j$>tHb<%DX@TLdU_Q?P)64v-TdI$D@Zq{jToA z%lIcNC>>lK^9lG-cax4GcEvotrsP*RyC zZH^;xTI81U({2?c!(Ef&SQ*ZK8czz1U-`LNfY8yFthj)spP|gMa_#Uk&R>Eshvei( zGB;>Hx>HTkFY0V{iTv>QaYa==K9gv{+`4M>yob68mqP(n{{ z6HK7zd)Dkr1f(2p6J&wyMFdd4>iKsEZ^Tqr`ANTF}#LVa;kaU{7*im_?-P ze^UTyP9=v(uH&L9E)SAK#Hm#4tH$fJRb58SoyUH7n0GK-b0=Vh_e!`(EU?H&2iVkT zLrXVa`d2$Ta!e=GeW;XE%uNcL_kJ0?I{@`l6O&Puqw0W2A^{jbzUHcR_X|qmC3%0e zkIG-=a{F*Wdu@w>Lv-)O^W~Z~{ISBGu;#`Z{fT8se@G&C7mIzCTwA8uPG1RY6wK-@ zFNhaA=Cmc-Jz2a%L^}UCv&oDEi*&;aM%<3pkBy`3cmcVkp49hi01Ld!DIWa?hJhM2 z24}SAx_@8ob+RS5_0j+S(ad<`jKbl;Z&R9`Eon4dS6o7u9GuZV4NLTs77M`|=}P^h z^1)3|R|I>9@5qJFLEe}Rb!(yaB5ta_BE5hNZ@9yM_j4yPyBh%cr@*@XKW_u&iD(KT zC!Ga*B~TPo`e?d;JqXb}tU+8HUT&WL`imAM$dcrjgH)2SEwnREv&1Fy6K@Oh zj^uee@$CUxXH|>?%X@NWto!ClqkmM>1SJXEE-xug`Yq5~@)5ichoEQzP_@@N4Z$=o zb?C2vfQf~@k~zoj`&51k7>mr%#VWtTo>fU@R0 z5HOmpn=P#8ld}x#>SJI#7!Z)D;ha}G2e5J;IZh7E!%bPQ7Bnd=-`k)4V|e;eTY3z2 z29}ZRb%&Jda+`u(PyLug<}OmmCDabRM{FD#a*mfsbyAcfA26fu4; zjPb5ti!wT51T1Hd6rVYBbXR^#i^xYiST(IwBaMT0!tt&%+1PzM`Xhh*1^i!soJ!GYxbgepqItXIILg(st10`cgAABk1pG z5n25lttI2(@?xz%4s#4I1!{T<7Pp>BpAYBKCkd0ixJ)zVv+?Raj5etFAWfUk)zUATr&|t{XHb8lm9UE z1%CJ#$PBbIU~ML>lT=EP7M{@3kk;n#1Db+#$$PlS=2NS?RC~ zm@S|_Pal%yT)UErA054b74Hm!)w9O%>4>Y2V*FqJ=jJjrM-# z;WbweJa9{%+D)wM5#xBoWSvSh14oQTOx(@!HN0&~#_ZBZk{<>xf3z=Vcxvb=GZ5f% z-tbRDGi}D9l!j!p399`69^O0o&}%$&8qw@F6hxhOBZ>UbKYs)B z>*v%LBIYqy!Plf9hF_faREM1Njy1Z?BK_Lw)XDreTN!odV$NJWOHd@{ES!mYx{Ko# z1HDm=EG^R;eHHz?Yx)n-SmX^G@4|f5)6Bmnng^Z?H9Ld2uq@oPAY)x9)Um$~XG2|3 zG#)jFjLm}&7-jOn#3q1?Q0_FA>FQjLsUB3TLwGoTONzY}Nfc0fzpXW)L{h4p9GKqj z5rDy+7MT$NP4BY?%!7Lvvi_XGR|z*dPKMMIwX6+xBz`t;_&zGaZv)*M7R%wbzdOJa znW~{Sb}EM1-w7V|QS!$29etRVz?(ZwJ%f%=eGr(%A z$A=C7@owB5lLCV6dqi_^twSF$NM160!J1J!IUq6tgFA-KDm?RCGCdw63f=|n5oHHx zXlbQ*>km7%L`VE5HUPuPqnzn}mcHzaOf$*p0aI)SrsczKA2hvyrSp6GiVzOXnt_Y*Ll9D4-2h6 z&?1{S~k*lNML6I$EFUFb#8Av;+0Ct)3<0tFI3vp!o6(9=z*#9EsrLC?N2_qFLk zR*z8-*3w#ZbW?6igwSJhN(m_53+SObV{D zza(C_ES{kYWB>yq6bQ|!PR<7#8lG%RPP*{1^2q$$>G6mcKqFwAIAfJvx%zuTJeg`t z3LB1ZR;@9Amoc{;;o(r&e?0`Q)I{W?8L@8vS;Fh&>|zg*ipTo34LDX5?g zn0Jr&RIL)1^m*@Jfj)ByHYP8DEJL3B8ty?DX%qT;dydEU-2Ny3mjl_8{y}M!@Y=kT zQu95N8oBuk`40_G0tg69GBSSA9|t7v`IT39+A_WjsByAbmarO9xa40PE5kIJa+RsPEF75eIzUUDEf%mq&B7jh;mpWf`&aWk;`QwN7izBm+qx= z=`K)8)NG4QxR)cV&yS=xp`9uUV-(d)1{`Mz*Q~j9dpIhBj*(sI=-=Ci}l+Q$aKEo)LZBIysAhKJRkEL_=on9+u z2<>8LQ!k-4-~7E~>yI~xHrmo8losgp5c&qjs=16SUT(7W*Hl>z_;|0DDV}Sw_6I%x z^|GQ~Bg=v8ic8qO*8 zfRgdiVxdyYztIUH1P59ZYBK)D42F1|7)5NW$^K0u=SW@B@RnK=O-VFc&|nc5&DR3G{NG(YdlT^QxgH4~ zv9jvCsnE0K-`_ZSMNh7aM)5q4wt?Paw75-3cBmrj3g%&TB4!w%xvuYR^+_#lWD{9< zD>iG+5E4QVCm4^j8r5{EjS8ba_} zf8Xo&*b3hMiXuMU@ev1z<8fl)Hhv$n6wHbwx~;^=dzJ~{+5*0}<1ePT)D*j|VNv<$ zXrA7QMn9-Fg?22Ps7+wW@I#5jI{q$CGd=hyfG`)1;7UyeEFCHD2UM)kI|J{EN5m44 zHZNQ>ox9dQQ{o#WjpQu`F%MrTZ99Ld0T^lN*$Z;HCcK^YBrNFdd~zM&cgH3N^6D9F_~nV>4%)%!EzAd1tr8oRdE zL_ld4hW)OiZ0cw8yaEI!TRV+}vj8H`F&mu9UutiqF*4h&k_+n^;Fhi+{LG$p^}x`u z$8uxlvwOcYs}YJ&4@5FfLzY=ym2RcpKfG^&Dlt7{VWX?>3t9)$t0^kZwlnJ)F;SNn zx!`dw*!*^terbyc3*Id=Bhl}?I2D|&`UHn3O>hVBX#6Iy;I7WDnW`tl3u-iNrisxL zOUi^(cMDJQ?rFd}+H0Kn+{4hjN+8T8+bKeXRs7TPydB@!Km#I*?o%igSHVd z=lid-p9N>P8}6z;95->UaJ}4ki7383xb@16d=!#?a#hcz514`Z!=#16<^l~hIgJ^d zFq0}}ay&mXmc`?4C3AoukZwlw`SIcDRLNLV=PDNN-dKHs3y-t-tVp>|Y@0`+tt@dSFT}4d3FjTtx?$!Tg zF$i))NMr-hEXJaHN`dQp$Kr`Bm+AS7e9|iRCQB(G_&$a*qGZG+zrXZCK~QIc)=^qz zNNHYqRLI^0hx5r=()?liyr7=RXN#^Rcbk-8u+kv*jB1ND*w4%1e>(fBu(rBrTZ$KV zr&w_(xH}ZL7AqDYgdzn36j~@A912B(yR{T4?hY*k4FoUlt_6ym{O7xm_qz}GF;CfR zuf66PbIviwWLX!ck~h8du9~6LY`>Wdyk)x|R?g(G{**A5_j&NR2%(v^j&%g>T{q;J zB8go%y^NB*Xes){dsW&x-zyv%K4wFS*Qps#BT5BG>3K^zTpm@S8SUr!B7iXz-CSW% zEDZR@1;ew6aKqlRs-^<#`2j(7i>fq3KU|U*j9V0n1p#G8eCe0q0WpnQi<8%{;>)q9 z2AF+|f=RJrqUe!rtLa5__!dXl&jJE`t6}YigsS>#gEK zF_WkQu$p+^T2QaOla?W^2PO=vk56Q9cJ*-U6}iPx*Jq*I#JN@|O^?OrmL&~v<8hML z7dZt>ujjmWAw(5um3vWVDoQ1Izn`Ep+=Vs#+|T<}WUOZ27Xwvd$GOO0rGR29Jr$Zz zYy&2@XLZD6DMzftj%vRoml@w_WriD2kNL&pyr;(p!*Cxv|l~OkG}rO#~3@ms}m^(|smz{&RV@p^9m!pHkaiP5CCo z+Ytz}rj^$v$0w(VQLpH7!dFYqF;d3Di-Mi`dbIAs0dKKZ!#XtG0-uI8xLv1Zwe$$aL%ru@ezBlrXy z*M&1XbL|H=r;!S;i@#M0Lu>8Rc16F9$3*FMgQGIo(&ng1Mdbx!GyJ6?_bw@~^cMns z>0DIW^WAXgrq;R|0T%#o;4VRjxPP?ZC3=Jr_r9hN`M|mn9y%*<(5|7I*?@$Y6cB~K zL+jah3efSt^@#O9#0$mRWbq+tz1lpY6St-mqQ~a>_|t8%!Qm5ws(FS)o!xJB3QvMI zhqk4bqiYEf*apUNdqz}3F;el)E0jZ9937dC`;}ay%`=XIk?uJiv}DGPM<$APp+}a^ z(aZaGU_(^O7l}`Y(wy`K$0n(LgO#n-+I_sAhIo5EvJPx4Pot#AzcnNMUq)*yIh;mX z@3eY6&thRZuavBXZJEJiQjKz8EPPkR*%cTrgnM9($V{WiJ`0>9^Z;g>s4w&N`92_b zygAH+_t?T1t$BlhmDPHMIwA+0f#+xY)tlJ;(vV?3Qc+(YD*BxY5B}PT?8|aoI%pN; za5_XZ==Nof!c{%BXz66j`4WW(OJ*$T^Z*)Ue@z}|ZDY7(_s|q`1D19L`7-fk-hOeH-&yeW8EI&!>xE^^JX<8wg)1IkI z66zCek0{jn`qgxIeS&hh#>5uL6o}bd^yakz+~ZYx^It`HG5_sslrDDSng`7X+1DhK z2^BWvfE!H>T2r`f&MWz{*Na*lSu4L{kdB1c)cBS8q94e zDnobiW%Iunp@h8Kz~kz-w6a~%QlhiU_@OMt{|s6`r7X#0pAQl)5@8x{kMePm6Aw9$ z{IPVNO=#0uC~ZFTdO&fCUj^UnI>-lm?8#DdoIFc&;^PDf90M}&77YK(!g%e{T$4 z0znkm|8RqLp!4QmT{-4JB~3yU2CMoKw;_ki071GRn=oXtTNk}yj3O#SVA7*4nimAC z-e44C4K6%}H+^6DHl7HsU^eFvS>etC0!*V$13=K~j}I2){RIAZ zM(UC|5*Q1d6WGHTfr^aGgb)Z1XaU;kGeZdgOjoi1d#*g=$-hU;VkD_=n{ z2F+4Gno_YIJ+{t-%(~-5W>*bFc(;4e1F>pKN~v;<^%E=Lqw}p|$`sN4VCK#eiz+Kr z$F)Z(k1i+LFrnJ`?O+v&65RH6!AUca<9&`J(_YQ~<@xO20)A2euMQ6gw@oh{jCK`5 z`>t>UlAe6+S|mYA$={sG++z13xv)Yd!5pAnw)0h~vONTv01c@3_q7?;#BIDactir*)iUB6 zm3U|A1}-q33R6cl?t{35hz?@qj>qZ|u~NpH_du~VR8>>HW;%NkwCWTaYn zcEER|tPzef5iyrBGmrPcg1ceFIwv_DXJ-=h+aG!}EwQ-C^re4yvL`PTO{8VfBoTN! zrX&ned#){{3ZJWh%~(1#NQvtBxpc4tq3ChEAd#iGAufv8Gxr$*MoKdJ3cO*hF^cMl zNoZDAGGMWic*8G()e!GOzBl4@*Hm14K6VGZpG903_3$F+_Q<9b@w8{Uw>}h*;#Y}1VNV3N7EoF;XVWM-taAI9uU_73csT9W7JooYN)kiuJY#w@-h-? zka3j60)MC6h|fUMf}DtyxJ5WbtGxCnevDCv5dTTO0!e{TtfAyKF3snS>6iSScOtq9 zcewEZ+X(M7;?J>T_k-0F87ub+yS+nCHgpgF#wQOUL(XSYQh1zX6np#X_Dfa0X(UgU zoG*~C*JLG_M!0O{W=hUNynhG~$l3L@fIBVqpt<5zNOOPz(E(BxyWP-GCyY_@0#AbTh{)6GzBM4}AO@?d!gxRt24+8&Vso z$2j6G*(pyRHL&x|XDT?HU~e20Ft|SVA5KU#{0xf0a?i#)-J+B!m79*dYW&N_ zR8-V;&ty(Hw@;xYHvV3j%XeV^ESF79#;nS9Fs7l1?Etw72Wo|Jzd5 zsWSewNo@4lE)Cf6K*6-6{`;tntJaNkpy_>4|MfD($%=Z?n01WLUlilMKQzYMH~N?1 zQo9%?^XS{p+7+7+6M@Zmk6Ly!Zi@0|*tSOo9-_`<7;fPCIpLh#-h_d^4E~EG6^}N0 zXHdOdvhNljVzTc0Zr5?FPhy_$sc(pd3k5I z`FTOVHYCkhK}*=Z{@|gY8YlDNB{x;h>ooQhfHRL^v415Yi)B9g5I*3mi2xP1f|SbE z5(DLqS&iA|@P#wgod1~p*_72cEY*B?6>=&V*A+uaB|X@-`O`KY-BcJd!|e654RU;k zWf2lUPIDwKi$0(5b=LERca;GD29hR}pX$w>^2W(v19q*L=k@NZodj zb9BC>MFAD{u?J6lC4f}UlFE}(g7PJ~dTYf?#~W4Kf6@v#sMe?>b2iH5EoA(glS*F) z>k_%+WT>0(5fB?y-V`MREdI+#d-1|c_281b zF|>-bl>EgmFQ&e+4t2`lL`F{3%5lFBYO^%$eX}VmhC7ZEzq$d3;}!660PNMoP}cMTn2fqdFq>XgVyQ;bv)yKf8734{s^id1>)g zB<%C1s?r>#XcS4>yJp3dP@nzvLuQ~RgsH$QGG`8ojHMX)>8p#dD|ohvMSb-V^}lo| z2_=A3$k)sXDpYj>0z&WeVQcE4DUg9U#5*7rK@ny;Z@Ob|4hC75g-KKz1XNum2(kZPuXIs~?^s)+xREUwJOyLx~3#P>uYvSyQ+r zU{mY`N>ZNGTo(QinW{wuWH%qOW^4Jf?!o0%_6mx^_T&~>Ek9$ zyDcrj@+ERdC9Jut%nAkOl!Vh_T1w0OV5T5aPn$7Z9O1qiay#=(JFLnim=dm7C5oW5 zj{%q#y{fTV#=)vZX1Z?MO9S*E<8o@LeA|2PrW5`c36qodVr(Zw<;z0jtD;bCJ>2BR zwG>a!CT80A8;uXe(=~^WEAT|(t`LM7jzYCk`3ErvI4Ey9z^mpo5Ip2nDx^vCHKte-%twS zbYS2a{0S7a$bKS|(rAAk5|2}Olz_4-i}RM`WxZ+SP(J<#59K2T^fLjrb!pk#JfKs0(H@gtbrGU(;*qZ(lX!|nMGTBl)@?;my+hJ+ zJh~v~rwTwnqkWK7^@FMNy{j|gkm?SOujf4yqrDsn3;vwZS~`qZGBlzra+`UMs_}FF*XqU0Io(g<7J)?#9nG-Hmefq zmBy%ot<5O%fPP?o{{Bf6N&FvPH6!t;qt>5-pGv0AX`$EjGCO?yp#r}Iq4mEyarkCA zDnt(Uf9o<)wg2TSo7tR&1x*8%1x~}%RCRVCZ{95hWfehmzGAp7Vao^Ru-z|{QsLUr z%uJjR#6T_BczaQjplQm09O5}ug;Iun=6OFQ&N+8-P=f8$QK=#?Y^qBd$Q?|8s!b{OO=HGt-nJOVO%6CBd%)xkJ3AM?WH>r3aW-W{v4P!1&8H3ZWDd?xYf=^ z=>AlFiRqwVHi7q*T)af=_qfi~gwrET;PZN-^V+?CmIXP$R8dhVfu*=qso!9nL5&TB zzT-k$Y&S|9CH+Bc*}sk%JllXw(G*qxcifiednlu|rujgY;@gYAfUQpWvRw=aym`%& z0=JxG2Kgy6vO-x|LE`Y)He6bUUewz_Me>PI*SgH=XV*tHbu+)?Z3>dQZYiv8_o>02 z!1@9h3jyy477)AE&;VOQADY>SB1rkpal6&PoO9&~(cX>_q~#38Yo1H3LzhLE8SoHe zxq;pee=X-NVlVdaD8S^X$OLX?GcTacY*tizalZCE+%YdhPu?};SAQ#Iaix82XwYk#56NQi!o9m zA{V*h-_#RpDhxkns2JX9$>=fU6Pn5<@$Y0XKHeO)9Qf|9bVjl!aqa9{HxOTo`io27 z{`MTEJj}AVmG;>g@d#`~nIWKf-o^%cxsxfPE*U+ZZ!bspaDgo6Pg@acJfoxY({BbD zUbla8drYMU-<&*U4q8j<%>&=`s|{#`z_3h@H+CzCNd~ofvyCweakk zHVRdxwx5?_Pw9x?JKus1^cI3v-D3vfICcyFeYy;ce^N_J!2(&OvLi_`v{ml|-Nbu= z6+JW2=KKWp46XPJU$)&k&$4^*KD3^%qi^a7FgTm z`7n{h_}5T2v(j4TC>#|Q6I-}O5D3v65O4ezK6i5?i-UGQe3Fg4njI_!vrC2(&fLcL z$MxhOfgf}6=CQJ-h+rEDCzLutJC>r4bF*CO+NP1YzlbbA?;NoP_~_Okug!|1@vA5L zTgX`rM*9g0cgGs`JdpRKBIUN|_pu7L0!@TVy3wkpb%^;XRGNPZ`z|sRw5EM=;VPc; z9Y{s-bE{uRAVBp;=vtyg=sr&M=f8@s0Yj+~DhvASM3 z8Tj{_ZF;dz7g^eXa{UmO{Prrs6{>a9OPVJv8zq1udtEp-f2{z%A(zbR21|9=w8fkj^a$ygL=XS{$iiOs^q)%J48R(9oA>VGTT=SWCu%uC2jI2J{V zZ#y7MjYn!ZKh?{SC2n0{B&~a!#LMx=muB)yI%Z9T&sN3|SEl%e9Gg(Wgb5-J_ACHO zlxF5EtUutL@H6(p`?}x$65b^LD&sOPMTzlmi%xW%Sql|?c?@mdKM)Z6V~pXxK9PY* zn)`o55OGpAK9Cx292=*w@QV0$3$dcYc=4GY_)9!h^5vxawZiMnV+e^2!5?Gr zk`~U~UxMyJR(~C$rw*2J^RDohKlCUbwah=i>$SP3Vthj88MHW9lWw64f0dbj%YMW9 zDmV+95h)TzY=|%_&}4X;Gy8;EE_sL(fsYbLI03|ADPFbshACxrEHvM@p2YHvGu_=L zr{A2U@|q;E(zo<~TH)o{h1M>4y8 zTbq%!G07);`=b!*dR=TJXAfOiJo|%sm9@9{(tv0yG$9mcI4PaJRnb1^K>>UiYqRb7 zt))4Bi=zPpriU*S1dXWQOSdkDO9g179W|cZXBhFMagC@98Pzi9456BTz`}87t;abo z%E8F~g&(OxG={v0LpaUDvdx3j-Tux~8-a^bZUmmoXy!5)t!AR;7gb>ze1kammA)X{ zsMf^$vAF@~zhiNJL;5rzb^wP*#1Pr3Ms3^&zT2#DreV+7_*m_{j~Wq-SM*$0Hz5{gen5D3dH9l;e`pIPAQvae zwC$=_yBqkB)ir*+Au1sua4_ZImQ_cHr5t%HIvwV*j(>ctyvWT9hF(j3`0sHX8}aZi z0A5W$um#>gdawm8;Z39RPsy@09jE6P=Y#VkZz9A=Igb;zZTOZ(x4>tcv7-N`H>RT1 zFyvT)14bd+fGhI*yPxW&@Xc4j43;fLJC46KAaUhAdD$(RD|ZV+duxJ$*#f$`9P%4R zthn#>7*vVV7@%cZN!5wG5YBRgZ-h3BSg5!~&@M&=amK5Ejzo~*$5fy6-t(2%`0G(c z2oTv6Mi_HSF5dz=F&DY#My$6B_WNw|2efx2pSjIS>wM}%l2n>f@dy6n>nB*pQWhsv zT>Y6Ls+(X_8gaXj96tz_qiQ&dy}T*PD~{~C`{aE$l)uTOWbbmP;aJ%x-Unhzd znuUg{sLeHeHPu60&YAZ%Dd-OGC$RG!w`9DM_PLd`bls^)23*)p2}p#r<($X>YM^Vq$K~sMA4Ms)$;vlYg z94D5nPxoCshje)nB-a0euMGrD8-XKIN2;gbn+r^brox%}#EAQ8+|-}`&eV&Ob?A0s zeK{s1&o#gx_e|@6qpTEEqvoC#6~4L6VryWrZBe&%#+>hhO6CPmPs`r^l0XQJ^WduW zQt0~_f3uJ5w-Uw>nXX^UsJ-ys>iwRw4Y4yD36XH5-T{Gq-LthKvRh(@ok@fQHV-!DGm|EHT>|k6Sg^ zGz_I=N=)?jxitFlQ-i387xXh9sZN^9@G-SiW4xAz-zuol0*}uDlHvSqvyNix7*81C z)~Zjm+DfyfU}ocYuI~)`{Ovljaul_s=%~_016ZWYPp~+b7TuO?`?1qnNjjH;#nW(J zZ*O+Hkp=bKN2DHKZgK9BOf>eLdHuT`G<4{#XhZ(+JOPR#8VxF|I&RRu(;&$DwTVdK z1vx}xGa$3q0-a;@xMlw>7<=Ern{FBuBHR^0ozfuZ>|&LvP%Kg9g2_}4A!9l zx1kOWO-e1Nsg!|2Fg`S^CvsYQi!6spL7SaQp0dxkAN3|#YatR5GIPpx)Rkw#6v^ec z+EzU06X$r2f;rzxxl8W({t_SiyA*kyr&qfnPTU$Feyx&%02 zQ>+Tx`6W40}-kmdV^*DcS0ejKJ zJg8B;*SS}fXX7>#u|*ZHp>}Y?sdw@wh!!TAoM;ZWC5Lvf3yGb z27aCJxg=_wYL#+$TsZJBjvyq?N#EW9pKVcdL9Xlh?fC%j3#{le(#dU-Lv;~ z`41SQ(gQQ6}r?;L2qLJ>gX>;zixTFrJFfo-LPSCzc zPE);i$)$ z8Q&{A^P*q>kfl2-|0VYV!ye&x)*s@GJ`sD-l#N7X)Gm>%6Hz_C@PMREaoQ9|;7c#H zPa>5L3NnRie29;_HfVefmjvI>MeP?j+N8V6o?R&ve&y_Z)w0CO^y#bbkKKA@CqA{` z;H8)1Xj`Wu8Ufr^&a)RMSZ)|$+9G5q6ZwCCbS86iEr{zntxrihQ5Mys9ha<>e$5z3 zkTc}MjO{V=P%>lvzlj6zN)iqioi5Et9Ad4eoAUd4-W}3r0+ASyFa;pL{^~6$c!1Mc zO=}LKd7edjFtK!T$S*n$#UOuRM9mGz{j}0eD*t-duGwG!fiJFFGN5K5`9Q{>Ev1x7 z2F(&5nJNvctbQ&BmHEq1(D%11?4vIe#evRBAhuQuf!yCYr#)Vqn;`HzSJ;4SU}WZZ zEWrdGLE7z)N*AtN$`lFg<$pVy40e(h->Kl9y%WUi_ox$+Qill2vfF^YE@Vw(YsZXo zmg{$N?duP!B#dN}k_$L;=fi#8yoHrfCp_^7UcC=WhYEBCFb?pkMw=xAM+l_s3jBGO zP$)4hRAOlG;~n^vsUxd!IZ&Lk+@fbE7S<6_SLXbP3l>CO@-fVeb%%`19M9gz{ zB`C#rIWSHd$9JUd*QOhtj(k>WonT;C=i|VWR{B_1H}|Z*P7QB`D)oSU1E*?Qx1-Qz zyE)amI;y+_?ek6$`m^uxH*oszpu|E`j)0mwcJU7P!}&$M-(#F^hjgd~b}Y#U?L~ab zQanqi2k+C|Cd8jGVgnJ1>KNL}y8B+foX@wUtsd9r#dB+WK6mycF$PZUk;#*enDITg zJQQ^%eCrwG2lawkXsE>Ri-D-t@8ve|zqUX6wfqkz0^(Gn-lg!{S-<96f~~U-w6%Q! z&}>wnX-Q11GY{3m#V7PhzZl<@TMlWjlfM()X>zVPuiu4Yr%SACr${n2A3#v(;0st( zy~2-kAV&rk-z~ck;GTvTFt(XE=`&vzpNKl0m@i~cWIe*MyAD2q|9w)>v`px*;<%*` zxpMX+3EciY1U}w`j0C~MWvl{5(60WRp{Yr?vHzj@YONz^;JX!-PKBjDc`Jk*yPbXVOmBi8MTb^7vp^J{n2=q=&Bq*|_v zN{cHO7~ZR8kA&>L$6N7x&Jst+$HFW<1BU}P31Rd2CXAfr4(}nAlftHlxvr8AkJHtY zIm7`>R~-K~eH1@Bk%vNC>giwcO)5pJaz^;?KIg5Z{l%SjQQzD=(TGHW2VaC8ez02E zL0Vt^fr^5bjQl03)*r7gg&l!jcI$!{$z5X%Lp2`P5j)Pum%Q7tyYZ@my}-sm>Z7~v z#EF8ZqxABPMj2IH_UZ-qeCrmSk_cL!u`CwX?+WT{Lm`QX9OCwzrV%)npwgwGafQGBc%ya7L&xu?5G@ad;NBg%~t zKYresavi9B>!pJFhvKN;(jo)tti=mgn|;@tMl+~-DZuc zq{IANlqj4tEl-6vCB~aBvEwZ0(Ohp_xA%TopHWh^gScZfM(@D|xYX6m>cuBr;+i)2 zw00r-&`WLtU&m;4p9Q3uQfSNnIZJe28G2-A_w{IN$u17d+;V#1Q5{2N$rDlGnHh8@ zMI{P|^NDDfnMepaI9O4V(>PAH>xvB!KQ4U;aFtYzo)FhkZ~XN$4z$YP_|uv0g~H6# zj*A$I2v)JW{@nn$D6l^l6crgZDrkWv=m%fRaay^-+o~e)jBzf)j%+FNebJude*2sa z#Ezm_rCWU5uFs0?dedaYXXxt}MK#u;m1<3%3-Tl;fSkReZ zozSw=j6LS~lA`1+`gtbIKgpOb-G#b6f7!v7(iR-m_JubjmYNB#^Sni&EW`2GmE*%n z0pp3k0y_}{l`sUwLyiMSIM7)B<{!>%j7YaxufJ}-N|bmO+4!QzCT9QBSZKFVA0$fx zT~48ip)>6By2_KpL$~E;l!fPQ;*`jR(|}h96f}m|oGxyfs61(T)eq#3`Fm~L{9T)) zj4fK#m^xtyyjlGeqyBhY%`GAP&e9-GNbOUr^GL0I{gE8g`uX6#B4>!$I*LZ@RFx_o zbpAT?e#|~4tB-LEsh)rp>(4Wz^3X-CHKUCplW_$XkzIaG9kyb6=!U~3G>TsQw! znk%`RFSrX@ai8}tQsf^*Qgg;LTfAR%es#b7omXU~aH(G&DrWrsCYJD^Qb=As{b}>> z?iAw1_ETk*gaYNY}K&8@t zr4&)#6~q~M$n=CG?K}I|fMJsSnHi1AiQ%DFJ-nf)Ry}2!B|L!A_U1`t8OEK*dkM_=-+@1X?CY5;l4d=CqjnI5^09w!qGxd!`m zQ>+Ts&escN=RV(#KfyS<9vVv1Y`)ha>6s6tOe5TiIv!B?F)v`}{lxJP9JODYU6j`@ z0iR)yhqX_d`m(nr?_`b$64Fq?z2}{b$ps>!3F6Jznj?MvLI~K1$K!g$Ph^vSd5Zu( zG)T;TCaNMr5H_7GY2!5g(M>u%hkyh!?qxfqF9}L^K$6>SbqeVGm{bU8k??h&i{y7o zp4#$;l`Jvw7qzfk%w>!20CSca-`X6MZ?X7|afiQ zwCv+WK3-|)48iE^#r}A$0{V-@GDowWh=!2|G1SPj+=G!;yafwR(f{V2$(^a_{Wm?V z^5P3(2!7Wm1p(r+%{hVR#M&zsepM?KA*9`yvJ(vww^+`e4eCrHnCGrS0Y!2;}nx1Nv IvTf-90OlTzSO5S3 literal 0 HcmV?d00001 diff --git a/packages/ui/src/ui-component/dialog/NodeInfoDialog.js b/packages/ui/src/ui-component/dialog/NodeInfoDialog.js index 6f3bec5de..a5dbd4113 100644 --- a/packages/ui/src/ui-component/dialog/NodeInfoDialog.js +++ b/packages/ui/src/ui-component/dialog/NodeInfoDialog.js @@ -132,6 +132,35 @@ const NodeInfoDialog = ({ show, dialogProps, onCancel }) => { )} + {dialogProps.data.tags && + dialogProps.data.tags.length && + dialogProps.data.tags.map((tag, index) => ( +
+ + {tag.toLowerCase()} + +
+ ))} diff --git a/packages/ui/src/utils/genericHelper.js b/packages/ui/src/utils/genericHelper.js index b34d6c720..69491fbc2 100644 --- a/packages/ui/src/utils/genericHelper.js +++ b/packages/ui/src/utils/genericHelper.js @@ -286,6 +286,7 @@ export const generateExportFlowData = (flowData) => { name: node.data.name, type: node.data.type, baseClasses: node.data.baseClasses, + tags: node.data.tags, category: node.data.category, description: node.data.description, inputParams: node.data.inputParams, diff --git a/packages/ui/src/views/canvas/AddNodes.js b/packages/ui/src/views/canvas/AddNodes.js index 7bf3e7ff0..ea813df53 100644 --- a/packages/ui/src/views/canvas/AddNodes.js +++ b/packages/ui/src/views/canvas/AddNodes.js @@ -22,7 +22,9 @@ import { Popper, Stack, Typography, - Chip + Chip, + Tab, + Tabs } from '@mui/material' import ExpandMoreIcon from '@mui/icons-material/ExpandMore' @@ -36,12 +38,20 @@ import { StyledFab } from 'ui-component/button/StyledFab' // icons import { IconPlus, IconSearch, IconMinus, IconX } from '@tabler/icons' +import LlamaindexPNG from 'assets/images/llamaindex.png' +import LangChainPNG from 'assets/images/langchain.png' // const import { baseURL } from 'store/constant' import { SET_COMPONENT_NODES } from 'store/actions' // ==============================|| ADD NODES||============================== // +function a11yProps(index) { + return { + id: `attachment-tab-${index}`, + 'aria-controls': `attachment-tabpanel-${index}` + } +} const AddNodes = ({ nodesData, node }) => { const theme = useTheme() @@ -52,6 +62,7 @@ const AddNodes = ({ nodesData, node }) => { const [nodes, setNodes] = useState({}) const [open, setOpen] = useState(false) const [categoryExpanded, setCategoryExpanded] = useState({}) + const [tabValue, setTabValue] = useState(0) const anchorRef = useRef(null) const prevOpen = useRef(open) @@ -86,6 +97,11 @@ const AddNodes = ({ nodesData, node }) => { } } + const handleTabChange = (event, newValue) => { + setTabValue(newValue) + filterSearch(searchValue, newValue) + } + const getSearchedNodes = (value) => { const passed = nodesData.filter((nd) => { const passesQuery = nd.name.toLowerCase().includes(value.toLowerCase()) @@ -95,23 +111,34 @@ const AddNodes = ({ nodesData, node }) => { return passed } - const filterSearch = (value) => { + const filterSearch = (value, newTabValue) => { setSearchValue(value) setTimeout(() => { if (value) { const returnData = getSearchedNodes(value) - groupByCategory(returnData, true) + groupByCategory(returnData, newTabValue ?? tabValue, true) scrollTop() } else if (value === '') { - groupByCategory(nodesData) + groupByCategory(nodesData, newTabValue ?? tabValue) scrollTop() } }, 500) } - const groupByCategory = (nodes, isFilter) => { + const groupByTags = (nodes, newTabValue = 0) => { + const langchainNodes = nodes.filter((nd) => !nd.tags) + const llmaindexNodes = nodes.filter((nd) => nd.tags && nd.tags.includes('LlamaIndex')) + if (newTabValue === 0) { + return langchainNodes + } else { + return llmaindexNodes + } + } + + const groupByCategory = (nodes, newTabValue, isFilter) => { + const taggedNodes = groupByTags(nodes, newTabValue) const accordianCategories = {} - const result = nodes.reduce(function (r, a) { + const result = taggedNodes.reduce(function (r, a) { r[a.category] = r[a.category] || [] r[a.category].push(a) accordianCategories[a.category] = isFilter ? true : false @@ -244,13 +271,61 @@ const AddNodes = ({ nodesData, node }) => { 'aria-label': 'weight' }} /> + + {['LangChain', 'LlamaIndex'].map((item, index) => ( + + {item} + + } + iconPosition='start' + key={index} + label={ + item === 'LlamaIndex' ? ( + <> +

{item}

+   + + + ) : ( +

{item}

+ ) + } + {...a11yProps(index)} + >
+ ))} +
{ ps.current = el }} - style={{ height: '100%', maxHeight: 'calc(100vh - 320px)', overflowX: 'hidden' }} + style={{ height: '100%', maxHeight: 'calc(100vh - 380px)', overflowX: 'hidden' }} > ({ background: theme.palette.card.main, @@ -179,9 +180,25 @@ const CanvasNode = ({ data }) => { {data.label} +
+ {data.tags && data.tags.includes('LlamaIndex') && ( + <> +
+ LlamaIndex +
+ + )} {warningMessage && ( <> -
{warningMessage}} placement='top'> diff --git a/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js b/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js index 8ec5ada30..44cb75e8d 100644 --- a/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js +++ b/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js @@ -13,6 +13,7 @@ import AdditionalParamsDialog from 'ui-component/dialog/AdditionalParamsDialog' // const import { baseURL } from 'store/constant' +import LlamaindexPNG from 'assets/images/llamaindex.png' const CardWrapper = styled(MainCard)(({ theme }) => ({ background: theme.palette.card.main, @@ -87,6 +88,23 @@ const MarketplaceCanvasNode = ({ data }) => { {data.label} +
+ {data.tags && data.tags.includes('LlamaIndex') && ( + <> +
+ LlamaIndex +
+ + )} {(data.inputAnchors.length > 0 || data.inputParams.length > 0) && ( <> From 2279ffd57dea7e29df3dda8364262708c915980d Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 24 Jan 2024 14:13:44 +0000 Subject: [PATCH 02/24] update conversational retrieval agent --- .../ConversationalRetrievalAgent.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts index b238456ac..4cd13d130 100644 --- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts +++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts @@ -70,9 +70,6 @@ class ConversationalRetrievalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) - - executor.memory = memory - const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) From 1fb3e25f5327ff16d791b4aca11278fbebf5a012 Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 25 Jan 2024 21:57:56 +0000 Subject: [PATCH 03/24] update linting issues --- .../ConversationalRetrievalAgent.ts | 1 + .../chatmodels/ChatAnthropic/ChatAnthropic.ts | 63 ++++++++++++++++++- .../nodes/chatmodels/ChatAnthropic/utils.ts | 61 ------------------ .../chatflows/Context Chat Engine.json | 10 ++- 4 files changed, 71 insertions(+), 64 deletions(-) delete mode 100644 packages/components/nodes/chatmodels/ChatAnthropic/utils.ts diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts index 4cd13d130..406a156ff 100644 --- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts +++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts @@ -70,6 +70,7 @@ class ConversationalRetrievalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) + const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts index 794058bd6..599578f5a 100644 --- a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts @@ -3,7 +3,6 @@ import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../ import { AnthropicInput, ChatAnthropic } from 'langchain/chat_models/anthropic' import { BaseCache } from 'langchain/schema' import { BaseLLMParams } from 'langchain/llms/base' -import { availableModels } from './utils' class ChatAnthropic_ChatModels implements INode { label: string @@ -43,7 +42,67 @@ class ChatAnthropic_ChatModels implements INode { label: 'Model Name', name: 'modelName', type: 'options', - options: [...availableModels], + options: [ + { + label: 'claude-2', + name: 'claude-2', + description: 'Claude 2 latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-2.1', + name: 'claude-2.1', + description: 'Claude 2 latest full version' + }, + { + label: 'claude-instant-1', + name: 'claude-instant-1', + description: 'Claude Instant latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-v1', + name: 'claude-v1' + }, + { + label: 'claude-v1-100k', + name: 'claude-v1-100k' + }, + { + label: 'claude-v1.0', + name: 'claude-v1.0' + }, + { + label: 'claude-v1.2', + name: 'claude-v1.2' + }, + { + label: 'claude-v1.3', + name: 'claude-v1.3' + }, + { + label: 'claude-v1.3-100k', + name: 'claude-v1.3-100k' + }, + { + label: 'claude-instant-v1', + name: 'claude-instant-v1' + }, + { + label: 'claude-instant-v1-100k', + name: 'claude-instant-v1-100k' + }, + { + label: 'claude-instant-v1.0', + name: 'claude-instant-v1.0' + }, + { + label: 'claude-instant-v1.1', + name: 'claude-instant-v1.1' + }, + { + label: 'claude-instant-v1.1-100k', + name: 'claude-instant-v1.1-100k' + } + ], default: 'claude-2', optional: true }, diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts b/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts deleted file mode 100644 index 209996a69..000000000 --- a/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts +++ /dev/null @@ -1,61 +0,0 @@ -export const availableModels = [ - { - label: 'claude-2', - name: 'claude-2', - description: 'Claude 2 latest major version, automatically get updates to the model as they are released' - }, - { - label: 'claude-2.1', - name: 'claude-2.1', - description: 'Claude 2 latest full version' - }, - { - label: 'claude-instant-1', - name: 'claude-instant-1', - description: 'Claude Instant latest major version, automatically get updates to the model as they are released' - }, - { - label: 'claude-v1', - name: 'claude-v1' - }, - { - label: 'claude-v1-100k', - name: 'claude-v1-100k' - }, - { - label: 'claude-v1.0', - name: 'claude-v1.0' - }, - { - label: 'claude-v1.2', - name: 'claude-v1.2' - }, - { - label: 'claude-v1.3', - name: 'claude-v1.3' - }, - { - label: 'claude-v1.3-100k', - name: 'claude-v1.3-100k' - }, - { - label: 'claude-instant-v1', - name: 'claude-instant-v1' - }, - { - label: 'claude-instant-v1-100k', - name: 'claude-instant-v1-100k' - }, - { - label: 'claude-instant-v1.0', - name: 'claude-instant-v1.0' - }, - { - label: 'claude-instant-v1.1', - name: 'claude-instant-v1.1' - }, - { - label: 'claude-instant-v1.1-100k', - name: 'claude-instant-v1.1-100k' - } -] diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json index e4fdd4f52..7608a550c 100644 --- a/packages/server/marketplaces/chatflows/Context Chat Engine.json +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -495,6 +495,13 @@ "category": "Engine", "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation", "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "contextChatEngine_0-input-returnSourceDocuments-boolean" + }, { "label": "System Message", "name": "systemMessagePrompt", @@ -529,7 +536,8 @@ "model": "{{chatOpenAI_LlamaIndex_2.data.instance}}", "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", "memory": "{{RedisBackedChatMemory_0.data.instance}}", - "systemMessagePrompt": "" + "systemMessagePrompt": "", + "returnSourceDocuments": true }, "outputAnchors": [ { From 3d670fec817e8d7b0d4b6de5ee262ffc5b64efa8 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 26 Jan 2024 00:22:24 +0000 Subject: [PATCH 04/24] update query engine --- .../nodes/engine/QueryEngine/QueryEngine.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/components/nodes/engine/QueryEngine/QueryEngine.ts b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts index 14774703b..bd6e040dc 100644 --- a/packages/components/nodes/engine/QueryEngine/QueryEngine.ts +++ b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts @@ -68,13 +68,15 @@ class QueryEngine_LlamaIndex implements INode { const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + let queryEngine = new RetrieverQueryEngine(vectorStoreRetriever) + if (responseSynthesizerObj) { if (responseSynthesizerObj.type === 'TreeSummarize') { const responseSynthesizer = new ResponseSynthesizer({ responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate), serviceContext: vectorStoreRetriever.serviceContext }) - return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) } else if (responseSynthesizerObj.type === 'CompactAndRefine') { const responseSynthesizer = new ResponseSynthesizer({ responseBuilder: new CompactAndRefine( @@ -84,7 +86,7 @@ class QueryEngine_LlamaIndex implements INode { ), serviceContext: vectorStoreRetriever.serviceContext }) - return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) } else if (responseSynthesizerObj.type === 'Refine') { const responseSynthesizer = new ResponseSynthesizer({ responseBuilder: new Refine( @@ -94,18 +96,16 @@ class QueryEngine_LlamaIndex implements INode { ), serviceContext: vectorStoreRetriever.serviceContext }) - return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { const responseSynthesizer = new ResponseSynthesizer({ responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext), serviceContext: vectorStoreRetriever.serviceContext }) - return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) } } - const queryEngine = new RetrieverQueryEngine(vectorStoreRetriever) - let text = '' let sourceDocuments: ICommonObject[] = [] let sourceNodes: BaseNode[] = [] From 21c47d8049c920631c1c53fb057b696fee319fca Mon Sep 17 00:00:00 2001 From: Henry Date: Sun, 28 Jan 2024 23:46:55 +0000 Subject: [PATCH 05/24] add subquery engine --- .../ChatOpenAI/ChatOpenAI_LlamaIndex.ts | 8 + .../OpenAIEmbedding_LlamaIndex.ts | 25 ++- .../SubQuestionQueryEngine.ts | 193 ++++++++++++++++++ .../SubQuestionQueryEngine/subQueryEngine.svg | 1 + .../tools/QueryEngineTool/QueryEngineTool.ts | 68 ++++++ .../tools/QueryEngineTool/queryEngineTool.svg | 1 + .../Pinecone/Pinecone_LlamaIndex.ts | 37 +++- .../vectorstores/SimpleStore/SimpleStore.ts | 27 ++- .../chatflows/Context Chat Engine.json | 64 +++++- .../marketplaces/chatflows/Query Engine.json | 48 ++++- packages/server/src/utils/index.ts | 2 +- 11 files changed, 453 insertions(+), 21 deletions(-) create mode 100644 packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts create mode 100644 packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg create mode 100644 packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts create mode 100644 packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts index 58b40823e..8b3567a6c 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts @@ -41,6 +41,14 @@ class ChatOpenAI_LlamaIndex_LLMs implements INode { label: 'gpt-4', name: 'gpt-4' }, + { + label: 'gpt-4-turbo-preview', + name: 'gpt-4-turbo-preview' + }, + { + label: 'gpt-4-0125-preview', + name: 'gpt-4-0125-preview' + }, { label: 'gpt-4-1106-preview', name: 'gpt-4-1106-preview' diff --git a/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts index dfd6bbf52..960197fe2 100644 --- a/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts +++ b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts @@ -32,6 +32,27 @@ class OpenAIEmbedding_LlamaIndex_Embeddings implements INode { credentialNames: ['openAIApi'] } this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'text-embedding-3-large', + name: 'text-embedding-3-large' + }, + { + label: 'text-embedding-3-small', + name: 'text-embedding-3-small' + }, + { + label: 'text-embedding-ada-002', + name: 'text-embedding-ada-002' + } + ], + default: 'text-embedding-ada-002', + optional: true + }, { label: 'Timeout', name: 'timeout', @@ -51,12 +72,14 @@ class OpenAIEmbedding_LlamaIndex_Embeddings implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const timeout = nodeData.inputs?.timeout as string + const modelName = nodeData.inputs?.modelName as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) const obj: Partial = { - apiKey: openAIApiKey + apiKey: openAIApiKey, + model: modelName } if (timeout) obj.timeout = parseInt(timeout, 10) diff --git a/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts b/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts new file mode 100644 index 000000000..a872c0a23 --- /dev/null +++ b/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts @@ -0,0 +1,193 @@ +import { flatten } from 'lodash' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { + TreeSummarize, + SimpleResponseBuilder, + Refine, + BaseEmbedding, + ResponseSynthesizer, + CompactAndRefine, + QueryEngineTool, + LLMQuestionGenerator, + SubQuestionQueryEngine, + BaseNode, + Metadata, + serviceContextFromDefaults +} from 'llamaindex' +import { reformatSourceDocuments } from '../EngineUtils' + +class SubQuestionQueryEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Sub Question Query Engine' + this.name = 'subQuestionQueryEngine' + this.version = 1.0 + this.type = 'SubQuestionQueryEngine' + this.icon = 'subQueryEngine.svg' + this.category = 'Engine' + this.description = + 'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'QueryEngine Tools', + name: 'queryEngineTools', + type: 'QueryEngineTool', + list: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Response Synthesizer', + name: 'responseSynthesizer', + type: 'ResponseSynthesizer', + description: + 'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See
more', + optional: true + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + + const serviceContext = serviceContextFromDefaults({ + llm: model, + embedModel: embeddings + }) + + let queryEngineTools = nodeData.inputs?.queryEngineTools as QueryEngineTool[] + queryEngineTools = flatten(queryEngineTools) + + let queryEngine = SubQuestionQueryEngine.fromDefaults({ + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + + const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + if (responseSynthesizerObj) { + if (responseSynthesizerObj.type === 'TreeSummarize') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new TreeSummarize(serviceContext, responseSynthesizerObj.textQAPromptTemplate), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'CompactAndRefine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new CompactAndRefine( + serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'Refine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new Refine( + serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new SimpleResponseBuilder(serviceContext), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } + } + + let text = '' + let sourceDocuments: ICommonObject[] = [] + let sourceNodes: BaseNode[] = [] + let isStreamingStarted = false + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await queryEngine.query({ query: input, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + + if (returnSourceDocuments) { + sourceDocuments = reformatSourceDocuments(sourceNodes) + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments) + } + } else { + const response = await queryEngine.query({ query: input }) + text = response?.response + sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? []) + } + + if (returnSourceDocuments) return { text, sourceDocuments } + else return { text } + } +} + +module.exports = { nodeClass: SubQuestionQueryEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg b/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg new file mode 100644 index 000000000..b94c20b5e --- /dev/null +++ b/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts b/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts new file mode 100644 index 000000000..163eff766 --- /dev/null +++ b/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts @@ -0,0 +1,68 @@ +import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { VectorStoreIndex } from 'llamaindex' + +class QueryEngine_Tools implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + tags: string[] + baseClasses: string[] + inputs?: INodeParams[] + + constructor() { + this.label = 'QueryEngine Tool' + this.name = 'queryEngineToolLlamaIndex' + this.version = 1.0 + this.type = 'QueryEngineTool' + this.icon = 'queryEngineTool.svg' + this.category = 'Tools' + this.tags = ['LlamaIndex'] + this.description = 'Tool used to invoke query engine' + this.baseClasses = [this.type] + this.inputs = [ + { + label: 'Vector Store Index', + name: 'vectorStoreIndex', + type: 'VectorStoreIndex' + }, + { + label: 'Tool Name', + name: 'toolName', + type: 'string', + description: 'Tool name must be small capital letter with underscore. Ex: my_tool' + }, + { + label: 'Tool Description', + name: 'toolDesc', + type: 'string', + rows: 4 + } + ] + } + + async init(nodeData: INodeData): Promise { + const vectorStoreIndex = nodeData.inputs?.vectorStoreIndex as VectorStoreIndex + const toolName = nodeData.inputs?.toolName as string + const toolDesc = nodeData.inputs?.toolDesc as string + const queryEngineTool = { + queryEngine: vectorStoreIndex.asQueryEngine({ + preFilters: { + ...(vectorStoreIndex as any).metadatafilter + } + }), + metadata: { + name: toolName, + description: toolDesc + }, + vectorStoreIndex + } + + return queryEngineTool + } +} + +module.exports = { nodeClass: QueryEngine_Tools } diff --git a/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg b/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg new file mode 100644 index 000000000..d49d8375c --- /dev/null +++ b/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts index a584fedeb..c0b2e5c11 100644 --- a/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts +++ b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts @@ -13,7 +13,7 @@ import { import { FetchResponse, Index, Pinecone, ScoredPineconeRecord } from '@pinecone-database/pinecone' import { flatten } from 'lodash' import { Document as LCDocument } from 'langchain/document' -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { flattenObject, getCredentialData, getCredentialParam } from '../../../src/utils' class PineconeLlamaIndex_VectorStores implements INode { @@ -28,6 +28,7 @@ class PineconeLlamaIndex_VectorStores implements INode { baseClasses: string[] inputs: INodeParams[] credential: INodeParams + outputs: INodeOutputsValue[] constructor() { this.label = 'Pinecone' @@ -93,6 +94,18 @@ class PineconeLlamaIndex_VectorStores implements INode { optional: true } ] + this.outputs = [ + { + label: 'Pinecone Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Pinecone Vector Store Index', + name: 'vectorStore', + baseClasses: [this.type, 'VectorStoreIndex'] + } + ] } //@ts-ignore @@ -155,8 +168,10 @@ class PineconeLlamaIndex_VectorStores implements INode { } if (pineconeNamespace) obj.namespace = pineconeNamespace + + let metadatafilter = {} if (pineconeMetadataFilter) { - const metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter) + metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter) obj.queryFilter = metadatafilter } @@ -171,11 +186,21 @@ class PineconeLlamaIndex_VectorStores implements INode { serviceContext }) - const retriever = index.asRetriever() - retriever.similarityTopK = k - ;(retriever as any).serviceContext = serviceContext + const output = nodeData.outputs?.output as string - return retriever + if (output === 'retriever') { + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + return retriever + } else if (output === 'vectorStore') { + ;(index as any).k = k + if (metadatafilter) { + ;(index as any).metadatafilter = metadatafilter + } + return index + } + return index } } diff --git a/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts index eeef6f693..36c383e98 100644 --- a/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts +++ b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts @@ -63,6 +63,18 @@ class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode { optional: true } ] + this.outputs = [ + { + label: 'SimpleStore Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'SimpleStore Vector Store Index', + name: 'vectorStore', + baseClasses: [this.type, 'VectorStoreIndex'] + } + ] } //@ts-ignore @@ -114,10 +126,19 @@ class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode { const storageContext = await storageContextFromDefaults({ persistDir: filePath }) const index = await VectorStoreIndex.init({ storageContext, serviceContext }) - const retriever = index.asRetriever() - retriever.similarityTopK = k - return retriever + const output = nodeData.outputs?.output as string + + if (output === 'retriever') { + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + return retriever + } else if (output === 'vectorStore') { + ;(index as any).k = k + return index + } + return index } } diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json index 7608a550c..475c6b3a5 100644 --- a/packages/server/marketplaces/chatflows/Context Chat Engine.json +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -181,6 +181,28 @@ "credentialNames": ["openAIApi"], "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbedding_LlamaIndex_0-input-modelName-options" + }, { "label": "Timeout", "name": "timeout", @@ -315,13 +337,29 @@ }, "outputAnchors": [ { - "id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", - "name": "pineconeLlamaIndex", - "label": "Pinecone", - "type": "Pinecone | VectorIndexRetriever" + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" } ], - "outputs": {}, + "outputs": { + "output": "retriever" + }, "selected": false }, "selected": false, @@ -367,6 +405,14 @@ "label": "gpt-4", "name": "gpt-4" }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, { "label": "gpt-4-1106-preview", "name": "gpt-4-1106-preview" @@ -672,6 +718,14 @@ "label": "gpt-4", "name": "gpt-4" }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, { "label": "gpt-4-1106-preview", "name": "gpt-4-1106-preview" diff --git a/packages/server/marketplaces/chatflows/Query Engine.json b/packages/server/marketplaces/chatflows/Query Engine.json index 625097cc4..825533339 100644 --- a/packages/server/marketplaces/chatflows/Query Engine.json +++ b/packages/server/marketplaces/chatflows/Query Engine.json @@ -163,13 +163,29 @@ }, "outputAnchors": [ { - "id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", - "name": "pineconeLlamaIndex", - "label": "Pinecone", - "type": "Pinecone | VectorIndexRetriever" + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" } ], - "outputs": {}, + "outputs": { + "output": "retriever" + }, "selected": false }, "selected": false, @@ -206,6 +222,28 @@ "credentialNames": ["openAIApi"], "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbedding_LlamaIndex_0-input-modelName-options" + }, { "label": "Timeout", "name": "timeout", diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index d00633432..9a14429da 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -844,7 +844,7 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod const whitelistAgents = ['openAIFunctionAgent', 'csvAgent', 'airtableAgent', 'conversationalRetrievalAgent'] isValidChainOrAgent = whitelistAgents.includes(endingNodeData.name) } else if (endingNodeData.category === 'Engine') { - const whitelistEngine = ['contextChatEngine', 'simpleChatEngine', 'queryEngine'] + const whitelistEngine = ['contextChatEngine', 'simpleChatEngine', 'queryEngine', 'subQuestionQueryEngine'] isValidChainOrAgent = whitelistEngine.includes(endingNodeData.name) } From 1b69ebdb93ac514a31e9a82a9d9eb7aca66233d8 Mon Sep 17 00:00:00 2001 From: niztal Date: Mon, 29 Jan 2024 23:30:38 +0200 Subject: [PATCH 06/24] mysql-ssl --- packages/server/src/DataSource.ts | 32 +++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/packages/server/src/DataSource.ts b/packages/server/src/DataSource.ts index 83a7fa2c9..bd7e8dd2a 100644 --- a/packages/server/src/DataSource.ts +++ b/packages/server/src/DataSource.ts @@ -40,7 +40,19 @@ export const init = async (): Promise => { synchronize: false, migrationsRun: false, entities: Object.values(entities), - migrations: mysqlMigrations + migrations: mysqlMigrations, + ...(process.env.DATABASE_SSL_KEY_BASE64 + ? { + ssl: { + rejectUnauthorized: false, + ca: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') + } + } + : process.env.DATABASE_SSL === 'true' + ? { + ssl: true + } + : {}), }) break case 'postgres': @@ -53,16 +65,16 @@ export const init = async (): Promise => { database: process.env.DATABASE_NAME, ...(process.env.DATABASE_SSL_KEY_BASE64 ? { - ssl: { - rejectUnauthorized: false, - cert: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') - } - } + ssl: { + rejectUnauthorized: false, + cert: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') + } + } : process.env.DATABASE_SSL === 'true' - ? { - ssl: true - } - : {}), + ? { + ssl: true + } + : {}), synchronize: false, migrationsRun: false, entities: Object.values(entities), From 289b04fb120ebbb0e9b61448d073025a618840ca Mon Sep 17 00:00:00 2001 From: niztal Date: Tue, 30 Jan 2024 00:50:27 +0200 Subject: [PATCH 07/24] fix lint --- packages/server/src/DataSource.ts | 36 +++++++++++++++---------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/packages/server/src/DataSource.ts b/packages/server/src/DataSource.ts index bd7e8dd2a..222dae5b0 100644 --- a/packages/server/src/DataSource.ts +++ b/packages/server/src/DataSource.ts @@ -43,16 +43,16 @@ export const init = async (): Promise => { migrations: mysqlMigrations, ...(process.env.DATABASE_SSL_KEY_BASE64 ? { - ssl: { - rejectUnauthorized: false, - ca: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') - } - } + ssl: { + rejectUnauthorized: false, + ca: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') + } + } : process.env.DATABASE_SSL === 'true' - ? { - ssl: true - } - : {}), + ? { + ssl: true + } + : {}) }) break case 'postgres': @@ -65,16 +65,16 @@ export const init = async (): Promise => { database: process.env.DATABASE_NAME, ...(process.env.DATABASE_SSL_KEY_BASE64 ? { - ssl: { - rejectUnauthorized: false, - cert: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') - } - } + ssl: { + rejectUnauthorized: false, + cert: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') + } + } : process.env.DATABASE_SSL === 'true' - ? { - ssl: true - } - : {}), + ? { + ssl: true + } + : {}), synchronize: false, migrationsRun: false, entities: Object.values(entities), From 82e78d3e4de48f0bcfb5f883b36e9b32cc11545a Mon Sep 17 00:00:00 2001 From: niztal Date: Tue, 30 Jan 2024 22:06:12 +0200 Subject: [PATCH 08/24] refactor uninfy pg and mysql to use the same SSL config function --- packages/server/src/DataSource.ts | 38 ++++++++++++------------------- 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/packages/server/src/DataSource.ts b/packages/server/src/DataSource.ts index 222dae5b0..861bd83b6 100644 --- a/packages/server/src/DataSource.ts +++ b/packages/server/src/DataSource.ts @@ -41,18 +41,7 @@ export const init = async (): Promise => { migrationsRun: false, entities: Object.values(entities), migrations: mysqlMigrations, - ...(process.env.DATABASE_SSL_KEY_BASE64 - ? { - ssl: { - rejectUnauthorized: false, - ca: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') - } - } - : process.env.DATABASE_SSL === 'true' - ? { - ssl: true - } - : {}) + ssl: getDatabaseSSLFromEnv(), }) break case 'postgres': @@ -63,18 +52,7 @@ export const init = async (): Promise => { username: process.env.DATABASE_USER, password: process.env.DATABASE_PASSWORD, database: process.env.DATABASE_NAME, - ...(process.env.DATABASE_SSL_KEY_BASE64 - ? { - ssl: { - rejectUnauthorized: false, - cert: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') - } - } - : process.env.DATABASE_SSL === 'true' - ? { - ssl: true - } - : {}), + ssl: getDatabaseSSLFromEnv(), synchronize: false, migrationsRun: false, entities: Object.values(entities), @@ -101,3 +79,15 @@ export function getDataSource(): DataSource { } return appDataSource } + +const getDatabaseSSLFromEnv = () => { + if (process.env.DATABASE_SSL_KEY_BASE64) { + return { + rejectUnauthorized: false, + ca: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') + }; + } else if (process.env.DATABASE_SSL === 'true') { + return true; + } + return {}; +} From a382e230f4817d2efce896a04c60656687cf8acd Mon Sep 17 00:00:00 2001 From: niztal Date: Tue, 30 Jan 2024 22:07:56 +0200 Subject: [PATCH 09/24] fix lint issues --- packages/server/src/DataSource.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/server/src/DataSource.ts b/packages/server/src/DataSource.ts index 861bd83b6..f563dce05 100644 --- a/packages/server/src/DataSource.ts +++ b/packages/server/src/DataSource.ts @@ -41,7 +41,7 @@ export const init = async (): Promise => { migrationsRun: false, entities: Object.values(entities), migrations: mysqlMigrations, - ssl: getDatabaseSSLFromEnv(), + ssl: getDatabaseSSLFromEnv() }) break case 'postgres': @@ -85,9 +85,9 @@ const getDatabaseSSLFromEnv = () => { return { rejectUnauthorized: false, ca: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64') - }; + } } else if (process.env.DATABASE_SSL === 'true') { - return true; + return true } - return {}; + return {} } From 4107118673136023d3a644dfff75fb85d12260ff Mon Sep 17 00:00:00 2001 From: niztal Date: Tue, 30 Jan 2024 23:44:42 +0200 Subject: [PATCH 10/24] avoid BWC PGSQLMODE returning empty ssl object --- packages/server/src/DataSource.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/server/src/DataSource.ts b/packages/server/src/DataSource.ts index f563dce05..483c070ee 100644 --- a/packages/server/src/DataSource.ts +++ b/packages/server/src/DataSource.ts @@ -89,5 +89,5 @@ const getDatabaseSSLFromEnv = () => { } else if (process.env.DATABASE_SSL === 'true') { return true } - return {} + return undefined } From 9ab8c36fd0adfaa9678914d219dadef404b709f9 Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 31 Jan 2024 13:33:27 +0000 Subject: [PATCH 11/24] =?UTF-8?q?=F0=9F=A5=B3=20flowise-components@1.5.3?= =?UTF-8?q?=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/components/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/package.json b/packages/components/package.json index a9d57a10a..7a7d2e758 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -1,6 +1,6 @@ { "name": "flowise-components", - "version": "1.5.2", + "version": "1.5.3", "description": "Flowiseai Components", "main": "dist/src/index", "types": "dist/src/index.d.ts", From 68bc3c708f518693cbc86a5232cdedbc0dd5a783 Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 31 Jan 2024 13:34:28 +0000 Subject: [PATCH 12/24] =?UTF-8?q?=F0=9F=A5=B3=20flowise-ui@1.4.9=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/ui/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ui/package.json b/packages/ui/package.json index 32c20aedb..68d78c953 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "flowise-ui", - "version": "1.4.8", + "version": "1.4.9", "license": "SEE LICENSE IN LICENSE.md", "homepage": "https://flowiseai.com", "author": { From c98ef7a8b13192809941b48d00d3763003862dc0 Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 31 Jan 2024 13:35:00 +0000 Subject: [PATCH 13/24] =?UTF-8?q?=F0=9F=A5=B3=20flowise@1.4.12=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- package.json | 2 +- packages/server/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 314408489..451f78551 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.11", + "version": "1.4.12", "private": true, "homepage": "https://flowiseai.com", "workspaces": [ diff --git a/packages/server/package.json b/packages/server/package.json index 0955448df..c7ed13ac2 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.11", + "version": "1.4.12", "description": "Flowiseai Server", "main": "dist/index", "types": "dist/index.d.ts", From 8d62adec2fbb8449ce650b0489e5ba00c7101326 Mon Sep 17 00:00:00 2001 From: melon Date: Thu, 1 Feb 2024 14:19:39 +0800 Subject: [PATCH 14/24] Refactor session ID assignment in App class --- packages/server/src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 045e40dd7..d58660f0b 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1440,7 +1440,7 @@ export class App { chatType, chatId, memoryType: memoryType ?? (chatId ? IsNull() : undefined), - sessionId: sessionId ?? (chatId ? IsNull() : undefined), + sessionId: sessionId ?? undefined, createdDate: toDate && fromDate ? Between(fromDate, toDate) : undefined }, order: { From 5c6b5b233c28784161048c3ddc5c32368f2a330a Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 1 Feb 2024 13:14:02 +0000 Subject: [PATCH 15/24] fix top K --- .../nodes/retrievers/CohereRerankRetriever/CohereRerank.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerank.ts b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerank.ts index f74b83655..f12d10260 100644 --- a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerank.ts +++ b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerank.ts @@ -47,7 +47,7 @@ export class CohereRerank extends BaseDocumentCompressor { doc.metadata.relevance_score = result.relevance_score finalResults.push(doc) }) - return finalResults + return finalResults.splice(0, this.k) } catch (error) { return documents } From 7881f295ab0391692abc5255d8dbb70c2f50c9ea Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 1 Feb 2024 19:18:14 +0000 Subject: [PATCH 16/24] update vec 2 doc node --- .../VectorStoreToDocument/VectorStoreToDocument.ts | 2 +- .../chatflows/Prompt Chaining with VectorStore.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts b/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts index becd0ac61..c087e000c 100644 --- a/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts +++ b/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts @@ -51,7 +51,7 @@ class VectorStoreToDocument_DocumentLoaders implements INode { { label: 'Document', name: 'document', - baseClasses: this.baseClasses + baseClasses: [...this.baseClasses, 'json'] }, { label: 'Text', diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json index 46e1257d2..c2060e79b 100644 --- a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json +++ b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json @@ -190,10 +190,10 @@ "type": "options", "options": [ { - "id": "vectorStoreToDocument_0-output-document-Document", + "id": "vectorStoreToDocument_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "vectorStoreToDocument_0-output-text-string|json", From 1522acbf5aa6cf53c1721ac66c21343c4aa718a3 Mon Sep 17 00:00:00 2001 From: Rafael Reis Date: Thu, 1 Feb 2024 16:19:30 -0300 Subject: [PATCH 17/24] Add gpt-3.5-turbo-0125 to ChatOpenAI.ts --- packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts index 2cb701c18..a0d0bd6ea 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts @@ -79,6 +79,10 @@ class ChatOpenAI_ChatModels implements INode { label: 'gpt-3.5-turbo', name: 'gpt-3.5-turbo' }, + { + label: 'gpt-3.5-turbo-0125', + name: 'gpt-3.5-turbo-0125' + }, { label: 'gpt-3.5-turbo-1106', name: 'gpt-3.5-turbo-1106' From 6013743705cbc98ef4ce819b013f9314add66a16 Mon Sep 17 00:00:00 2001 From: Henry Date: Sat, 3 Feb 2024 02:14:43 +0000 Subject: [PATCH 18/24] add subquestion query engine marketplace template --- .../chatflows/SubQuestion Query Engine.json | 1201 +++++++++++++++++ 1 file changed, 1201 insertions(+) create mode 100644 packages/server/marketplaces/chatflows/SubQuestion Query Engine.json diff --git a/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json new file mode 100644 index 000000000..f14607daa --- /dev/null +++ b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json @@ -0,0 +1,1201 @@ +{ + "description": "Breaks down query into sub questions for each relevant data source, then combine into final response", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 749, + "id": "compactrefineLlamaIndex_0", + "position": { + "x": -1214.7329938486841, + "y": 56.52482754447425 + }, + "type": "customNode", + "data": { + "id": "compactrefineLlamaIndex_0", + "label": "Compact and Refine", + "version": 1, + "name": "compactrefineLlamaIndex", + "type": "CompactRefine", + "baseClasses": ["CompactRefine", "ResponseSynthesizer"], + "tags": ["LlamaIndex"], + "category": "Response Synthesizer", + "description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.", + "inputParams": [ + { + "label": "Refine Prompt", + "name": "refinePrompt", + "type": "string", + "rows": 4, + "default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-refinePrompt-string" + }, + { + "label": "Text QA Prompt", + "name": "textQAPrompt", + "type": "string", + "rows": 4, + "default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:", + "warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-textQAPrompt-string" + } + ], + "inputAnchors": [], + "inputs": { + "refinePrompt": "A user has selected a set of SEC filing documents and has asked a question about them.\nThe SEC documents have the following titles:\n- Apple Inc (APPL) FORM 10K 2022\n- Tesla Inc (TSLA) FORM 10K 2022\nThe original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "textQAPrompt": "A user has selected a set of SEC filing documents and has asked a question about them.\nThe SEC documents have the following titles:\n- Apple Inc (APPL) FORM 10K 2022\n- Tesla Inc (TSLA) FORM 10K 2022\nContext information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:" + }, + "outputAnchors": [ + { + "id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "name": "compactrefineLlamaIndex", + "label": "CompactRefine", + "type": "CompactRefine | ResponseSynthesizer" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -1214.7329938486841, + "y": 56.52482754447425 + }, + "dragging": false + }, + { + "width": 300, + "height": 611, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 37.23548045607484, + "y": -119.7364648743818 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": [], + "model": "{{chatOpenAI_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "flowiseindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"tesla\"}", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "vectorStore" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 37.23548045607484, + "y": -119.7364648743818 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_0", + "position": { + "x": -455.232655468177, + "y": -711.0080711676725 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_0", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex", "BaseLLM"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_0-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_0-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex | BaseLLM" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -455.232655468177, + "y": -711.0080711676725 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": -451.0082548287243, + "y": -127.15143353229783 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -451.0082548287243, + "y": -127.15143353229783 + } + }, + { + "width": 300, + "height": 511, + "id": "queryEngineToolLlamaIndex_0", + "position": { + "x": 460.37559236135905, + "y": -565.6224030941121 + }, + "type": "customNode", + "data": { + "id": "queryEngineToolLlamaIndex_0", + "label": "QueryEngine Tool", + "version": 1, + "name": "queryEngineToolLlamaIndex", + "type": "QueryEngineTool", + "baseClasses": ["QueryEngineTool"], + "tags": ["LlamaIndex"], + "category": "Tools", + "description": "Execute actions using ChatGPT Plugin Url", + "inputParams": [ + { + "label": "Tool Name", + "name": "toolName", + "type": "string", + "description": "Tool name must be small capital letter with underscore. Ex: my_tool", + "id": "queryEngineToolLlamaIndex_0-input-toolName-string" + }, + { + "label": "Tool Description", + "name": "toolDesc", + "type": "string", + "rows": 4, + "id": "queryEngineToolLlamaIndex_0-input-toolDesc-string" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Index", + "name": "vectorStoreIndex", + "type": "VectorStoreIndex", + "id": "queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex" + } + ], + "inputs": { + "vectorStoreIndex": "{{pineconeLlamaIndex_1.data.instance}}", + "toolName": "apple_tool", + "toolDesc": "A SEC Form 10K filing describing the financials of Apple Inc (APPL) for the 2022 time period." + }, + "outputAnchors": [ + { + "id": "queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool", + "name": "queryEngineToolLlamaIndex", + "label": "QueryEngineTool", + "type": "QueryEngineTool" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 460.37559236135905, + "y": -565.6224030941121 + }, + "dragging": false + }, + { + "width": 300, + "height": 611, + "id": "pineconeLlamaIndex_1", + "position": { + "x": 42.17855025460784, + "y": -839.8824444107056 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_1", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_1-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_1-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_1-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_1-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_1-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_1-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_1-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": [], + "model": "{{chatOpenAI_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "flowiseindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"apple\"}", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_1-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "vectorStore" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 42.17855025460784, + "y": -839.8824444107056 + }, + "dragging": false + }, + { + "width": 300, + "height": 511, + "id": "queryEngineToolLlamaIndex_1", + "position": { + "x": 462.16721384216123, + "y": -17.750065363429798 + }, + "type": "customNode", + "data": { + "id": "queryEngineToolLlamaIndex_1", + "label": "QueryEngine Tool", + "version": 1, + "name": "queryEngineToolLlamaIndex", + "type": "QueryEngineTool", + "baseClasses": ["QueryEngineTool"], + "tags": ["LlamaIndex"], + "category": "Tools", + "description": "Execute actions using ChatGPT Plugin Url", + "inputParams": [ + { + "label": "Tool Name", + "name": "toolName", + "type": "string", + "description": "Tool name must be small capital letter with underscore. Ex: my_tool", + "id": "queryEngineToolLlamaIndex_1-input-toolName-string" + }, + { + "label": "Tool Description", + "name": "toolDesc", + "type": "string", + "rows": 4, + "id": "queryEngineToolLlamaIndex_1-input-toolDesc-string" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Index", + "name": "vectorStoreIndex", + "type": "VectorStoreIndex", + "id": "queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex" + } + ], + "inputs": { + "vectorStoreIndex": "{{pineconeLlamaIndex_0.data.instance}}", + "toolName": "tesla_tool", + "toolDesc": "A SEC Form 10K filing describing the financials of Tesla Inc (TSLA) for the 2022 time period." + }, + "outputAnchors": [ + { + "id": "queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool", + "name": "queryEngineToolLlamaIndex", + "label": "QueryEngineTool", + "type": "QueryEngineTool" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 462.16721384216123, + "y": -17.750065363429798 + }, + "dragging": false + }, + { + "width": 300, + "height": 484, + "id": "subQuestionQueryEngine_0", + "position": { + "x": 982.7583030231563, + "y": 349.50858200305896 + }, + "type": "customNode", + "data": { + "id": "subQuestionQueryEngine_0", + "label": "Sub Question Query Engine", + "version": 1, + "name": "subQuestionQueryEngine", + "type": "SubQuestionQueryEngine", + "baseClasses": ["SubQuestionQueryEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple query engine built to answer question over your data, without memory", + "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "subQuestionQueryEngine_0-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "QueryEngine Tools", + "name": "queryEngineTools", + "type": "QueryEngineTool", + "list": true, + "id": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "label": "Response Synthesizer", + "name": "responseSynthesizer", + "type": "ResponseSynthesizer", + "description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more", + "optional": true, + "id": "subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + } + ], + "inputs": { + "queryEngineTools": ["{{queryEngineToolLlamaIndex_1.data.instance}}", "{{queryEngineToolLlamaIndex_0.data.instance}}"], + "model": "{{chatOpenAI_LlamaIndex_1.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_1.data.instance}}", + "responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "subQuestionQueryEngine_0-output-subQuestionQueryEngine-SubQuestionQueryEngine", + "name": "subQuestionQueryEngine", + "label": "SubQuestionQueryEngine", + "type": "SubQuestionQueryEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 982.7583030231563, + "y": 349.50858200305896 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_1", + "position": { + "x": -846.9087470244615, + "y": 23.446501495097493 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_1", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex", "BaseLLM"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex | BaseLLM" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -846.9087470244615, + "y": 23.446501495097493 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_1", + "position": { + "x": -437.3136244622061, + "y": 329.99986619821175 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_1", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_1-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_1-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_1-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -437.3136244622061, + "y": 329.99986619821175 + } + }, + { + "width": 300, + "height": 82, + "id": "stickyNote_0", + "position": { + "x": 35.90892935132143, + "y": -936.1282632923861 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": ["StickyNote"], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_0-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Query previously upserted documents with corresponding metadata key value pair - \n{ source: \"apple\"}" + }, + "outputAnchors": [ + { + "id": "stickyNote_0-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 35.90892935132143, + "y": -936.1282632923861 + }, + "dragging": false + }, + { + "width": 300, + "height": 82, + "id": "stickyNote_1", + "position": { + "x": 37.74909394815296, + "y": -215.17456133022054 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_1", + "label": "Sticky Note", + "version": 1, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": ["StickyNote"], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_1-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Query previously upserted documents with corresponding metadata key value pair - \n{ source: \"tesla\"}" + }, + "outputAnchors": [ + { + "id": "stickyNote_1-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 37.74909394815296, + "y": -215.17456133022054 + }, + "dragging": false + }, + { + "width": 300, + "height": 163, + "id": "stickyNote_2", + "position": { + "x": 984.9543031068163, + "y": 171.04264459503852 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_2", + "label": "Sticky Note", + "version": 1, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": ["StickyNote"], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_2-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Break questions into subqueries, then retrieve corresponding context using queryengine tool.\n\nThis implementation does not contains memory, we can use OpenAI Agent to function call this flow" + }, + "outputAnchors": [ + { + "id": "stickyNote_2-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 984.9543031068163, + "y": 171.04264459503852 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "chatOpenAI_LlamaIndex_0", + "sourceHandle": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "target": "pineconeLlamaIndex_1", + "targetHandle": "pineconeLlamaIndex_1-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_0-chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-pineconeLlamaIndex_1-pineconeLlamaIndex_1-input-model-BaseChatModel_LlamaIndex" + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_1", + "targetHandle": "pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_1-pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "source": "pineconeLlamaIndex_1", + "sourceHandle": "pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex", + "target": "queryEngineToolLlamaIndex_0", + "targetHandle": "queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex", + "type": "buttonedge", + "id": "pineconeLlamaIndex_1-pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex-queryEngineToolLlamaIndex_0-queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex" + }, + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex", + "target": "queryEngineToolLlamaIndex_1", + "targetHandle": "queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex-queryEngineToolLlamaIndex_1-queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex" + }, + { + "source": "queryEngineToolLlamaIndex_1", + "sourceHandle": "queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool", + "type": "buttonedge", + "id": "queryEngineToolLlamaIndex_1-queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool" + }, + { + "source": "queryEngineToolLlamaIndex_0", + "sourceHandle": "queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool", + "type": "buttonedge", + "id": "queryEngineToolLlamaIndex_0-queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool" + }, + { + "source": "chatOpenAI_LlamaIndex_1", + "sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "source": "openAIEmbedding_LlamaIndex_1", + "sourceHandle": "openAIEmbedding_LlamaIndex_1-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_1-openAIEmbedding_LlamaIndex_1-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "source": "compactrefineLlamaIndex_0", + "sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "type": "buttonedge", + "id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "source": "chatOpenAI_LlamaIndex_0", + "sourceHandle": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_0-chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + } + ] +} From 011a0a75c332257278c8ae5d8b42d5a596f3060f Mon Sep 17 00:00:00 2001 From: Ilyes Tascou Date: Mon, 5 Feb 2024 17:20:05 +0100 Subject: [PATCH 19/24] add kotlin files to folder-loader --- packages/components/nodes/documentloaders/Folder/Folder.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/components/nodes/documentloaders/Folder/Folder.ts b/packages/components/nodes/documentloaders/Folder/Folder.ts index f5d0c6402..f8346e3cb 100644 --- a/packages/components/nodes/documentloaders/Folder/Folder.ts +++ b/packages/components/nodes/documentloaders/Folder/Folder.ts @@ -70,6 +70,7 @@ class Folder_DocumentLoaders implements INode { '.css': (path) => new TextLoader(path), '.go': (path) => new TextLoader(path), // Go '.h': (path) => new TextLoader(path), // C++ Header files + '.kt': (path) => new TextLoader(path), // Kotlin '.java': (path) => new TextLoader(path), // Java '.js': (path) => new TextLoader(path), // JavaScript '.less': (path) => new TextLoader(path), // Less files From dcacb02a4758b07d161d9a14c3fbae2a32e871aa Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 6 Feb 2024 03:01:47 +0800 Subject: [PATCH 20/24] =?UTF-8?q?=F0=9F=A5=B3=20flowise-components@1.6.0?= =?UTF-8?q?=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/components/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/package.json b/packages/components/package.json index bcb746b04..62ffba94f 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -1,6 +1,6 @@ { "name": "flowise-components", - "version": "1.5.3", + "version": "1.6.0", "description": "Flowiseai Components", "main": "dist/src/index", "types": "dist/src/index.d.ts", From 9189b7013127e6b533376ddad596e831a1cdb6eb Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 6 Feb 2024 03:02:16 +0800 Subject: [PATCH 21/24] =?UTF-8?q?=F0=9F=A5=B3=20flowise-ui@1.5.0=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/ui/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ui/package.json b/packages/ui/package.json index 68d78c953..eb3bebda5 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "flowise-ui", - "version": "1.4.9", + "version": "1.5.0", "license": "SEE LICENSE IN LICENSE.md", "homepage": "https://flowiseai.com", "author": { From 7faaf13ccc3705030a4dd87d05cd60e9f69772c3 Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 6 Feb 2024 03:02:52 +0800 Subject: [PATCH 22/24] =?UTF-8?q?=F0=9F=A5=B3=20flowise@1.5.0=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- package.json | 2 +- packages/server/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 451f78551..5f5f5812f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.12", + "version": "1.5.0", "private": true, "homepage": "https://flowiseai.com", "workspaces": [ diff --git a/packages/server/package.json b/packages/server/package.json index c7ed13ac2..0d7dea774 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.12", + "version": "1.5.0", "description": "Flowiseai Server", "main": "dist/index", "types": "dist/index.d.ts", From 2bb2a7588a0172a937bde34aefce5173e24f34a7 Mon Sep 17 00:00:00 2001 From: Ilyes Tascou Date: Tue, 6 Feb 2024 14:25:40 +0100 Subject: [PATCH 23/24] add recursive option for folder-loader --- .../nodes/documentloaders/Folder/Folder.ts | 96 +++++++++++-------- 1 file changed, 54 insertions(+), 42 deletions(-) diff --git a/packages/components/nodes/documentloaders/Folder/Folder.ts b/packages/components/nodes/documentloaders/Folder/Folder.ts index f8346e3cb..ab7705629 100644 --- a/packages/components/nodes/documentloaders/Folder/Folder.ts +++ b/packages/components/nodes/documentloaders/Folder/Folder.ts @@ -34,6 +34,12 @@ class Folder_DocumentLoaders implements INode { type: 'string', placeholder: '' }, + { + label: 'Recursive', + name: 'recursive', + type: 'boolean', + additionalParams: false + }, { label: 'Text Splitter', name: 'textSplitter', @@ -54,49 +60,55 @@ class Folder_DocumentLoaders implements INode { const textSplitter = nodeData.inputs?.textSplitter as TextSplitter const folderPath = nodeData.inputs?.folderPath as string const metadata = nodeData.inputs?.metadata + const recursive = nodeData.inputs?.recursive as boolean - const loader = new DirectoryLoader(folderPath, { - '.json': (path) => new JSONLoader(path), - '.txt': (path) => new TextLoader(path), - '.csv': (path) => new CSVLoader(path), - '.docx': (path) => new DocxLoader(path), - // @ts-ignore - '.pdf': (path) => new PDFLoader(path, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }), - '.aspx': (path) => new TextLoader(path), - '.asp': (path) => new TextLoader(path), - '.cpp': (path) => new TextLoader(path), // C++ - '.c': (path) => new TextLoader(path), - '.cs': (path) => new TextLoader(path), - '.css': (path) => new TextLoader(path), - '.go': (path) => new TextLoader(path), // Go - '.h': (path) => new TextLoader(path), // C++ Header files - '.kt': (path) => new TextLoader(path), // Kotlin - '.java': (path) => new TextLoader(path), // Java - '.js': (path) => new TextLoader(path), // JavaScript - '.less': (path) => new TextLoader(path), // Less files - '.ts': (path) => new TextLoader(path), // TypeScript - '.php': (path) => new TextLoader(path), // PHP - '.proto': (path) => new TextLoader(path), // Protocol Buffers - '.python': (path) => new TextLoader(path), // Python - '.py': (path) => new TextLoader(path), // Python - '.rst': (path) => new TextLoader(path), // reStructuredText - '.ruby': (path) => new TextLoader(path), // Ruby - '.rb': (path) => new TextLoader(path), // Ruby - '.rs': (path) => new TextLoader(path), // Rust - '.scala': (path) => new TextLoader(path), // Scala - '.sc': (path) => new TextLoader(path), // Scala - '.scss': (path) => new TextLoader(path), // Sass - '.sol': (path) => new TextLoader(path), // Solidity - '.sql': (path) => new TextLoader(path), //SQL - '.swift': (path) => new TextLoader(path), // Swift - '.markdown': (path) => new TextLoader(path), // Markdown - '.md': (path) => new TextLoader(path), // Markdown - '.tex': (path) => new TextLoader(path), // LaTeX - '.ltx': (path) => new TextLoader(path), // LaTeX - '.html': (path) => new TextLoader(path), // HTML - '.vb': (path) => new TextLoader(path), // Visual Basic - '.xml': (path) => new TextLoader(path) // XML - }) + console.log('Recursive: ', recursive) + const loader = new DirectoryLoader( + folderPath, + { + '.json': (path) => new JSONLoader(path), + '.txt': (path) => new TextLoader(path), + '.csv': (path) => new CSVLoader(path), + '.docx': (path) => new DocxLoader(path), + // @ts-ignore + '.pdf': (path) => new PDFLoader(path, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }), + '.aspx': (path) => new TextLoader(path), + '.asp': (path) => new TextLoader(path), + '.cpp': (path) => new TextLoader(path), // C++ + '.c': (path) => new TextLoader(path), + '.cs': (path) => new TextLoader(path), + '.css': (path) => new TextLoader(path), + '.go': (path) => new TextLoader(path), // Go + '.h': (path) => new TextLoader(path), // C++ Header files + '.kt': (path) => new TextLoader(path), // Kotlin + '.java': (path) => new TextLoader(path), // Java + '.js': (path) => new TextLoader(path), // JavaScript + '.less': (path) => new TextLoader(path), // Less files + '.ts': (path) => new TextLoader(path), // TypeScript + '.php': (path) => new TextLoader(path), // PHP + '.proto': (path) => new TextLoader(path), // Protocol Buffers + '.python': (path) => new TextLoader(path), // Python + '.py': (path) => new TextLoader(path), // Python + '.rst': (path) => new TextLoader(path), // reStructuredText + '.ruby': (path) => new TextLoader(path), // Ruby + '.rb': (path) => new TextLoader(path), // Ruby + '.rs': (path) => new TextLoader(path), // Rust + '.scala': (path) => new TextLoader(path), // Scala + '.sc': (path) => new TextLoader(path), // Scala + '.scss': (path) => new TextLoader(path), // Sass + '.sol': (path) => new TextLoader(path), // Solidity + '.sql': (path) => new TextLoader(path), //SQL + '.swift': (path) => new TextLoader(path), // Swift + '.markdown': (path) => new TextLoader(path), // Markdown + '.md': (path) => new TextLoader(path), // Markdown + '.tex': (path) => new TextLoader(path), // LaTeX + '.ltx': (path) => new TextLoader(path), // LaTeX + '.html': (path) => new TextLoader(path), // HTML + '.vb': (path) => new TextLoader(path), // Visual Basic + '.xml': (path) => new TextLoader(path) // XML + }, + recursive + ) let docs = [] if (textSplitter) { From 19fb13baf05642a09e10a57ed302bca8e339e2dd Mon Sep 17 00:00:00 2001 From: Ilyes Tascou Date: Tue, 6 Feb 2024 14:36:32 +0100 Subject: [PATCH 24/24] fix for linting --- packages/components/nodes/documentloaders/Folder/Folder.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/components/nodes/documentloaders/Folder/Folder.ts b/packages/components/nodes/documentloaders/Folder/Folder.ts index ab7705629..fb3db8e8e 100644 --- a/packages/components/nodes/documentloaders/Folder/Folder.ts +++ b/packages/components/nodes/documentloaders/Folder/Folder.ts @@ -62,7 +62,6 @@ class Folder_DocumentLoaders implements INode { const metadata = nodeData.inputs?.metadata const recursive = nodeData.inputs?.recursive as boolean - console.log('Recursive: ', recursive) const loader = new DirectoryLoader( folderPath, {