diff --git a/.github/workflows/autoSyncMergedPullRequest.yml b/.github/workflows/autoSyncMergedPullRequest.yml index a0191ba9b..a1f1fc33b 100644 --- a/.github/workflows/autoSyncMergedPullRequest.yml +++ b/.github/workflows/autoSyncMergedPullRequest.yml @@ -1,33 +1,33 @@ name: autoSyncMergedPullRequest on: - pull_request_target: - types: - - closed - branches: [ "main" ] + pull_request_target: + types: + - closed + branches: ['main'] jobs: - autoSyncMergedPullRequest: - if: github.event.pull_request.merged == true - runs-on: ubuntu-latest - permissions: - contents: write - steps: - - uses: actions/checkout@v3 - - name: Show PR info - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - echo The PR #${{ github.event.pull_request.number }} was merged on main branch! - - name: Repository Dispatch - uses: peter-evans/repository-dispatch@v2 - with: - token: ${{ secrets.AUTOSYNC_TOKEN }} - repository: ${{ secrets.AUTOSYNC_CH_URL }} - event-type: ${{ secrets.AUTOSYNC_PR_EVENT_TYPE }} - client-payload: >- - { - "ref": "${{ github.ref }}", - "prNumber": "${{ github.event.pull_request.number }}", - "prTitle": "${{ github.event.pull_request.title }}", - "prDescription": "${{ github.event.pull_request.description }}", - "sha": "${{ github.sha }}" - } + autoSyncMergedPullRequest: + if: github.event.pull_request.merged == true + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - uses: actions/checkout@v3 + - name: Show PR info + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + echo The PR #${{ github.event.pull_request.number }} was merged on main branch! + - name: Repository Dispatch + uses: peter-evans/repository-dispatch@v2 + with: + token: ${{ secrets.AUTOSYNC_TOKEN }} + repository: ${{ secrets.AUTOSYNC_CH_URL }} + event-type: ${{ secrets.AUTOSYNC_PR_EVENT_TYPE }} + client-payload: >- + { + "ref": "${{ github.ref }}", + "prNumber": "${{ github.event.pull_request.number }}", + "prTitle": "${{ github.event.pull_request.title }}", + "prDescription": "${{ github.event.pull_request.description }}", + "sha": "${{ github.sha }}" + } diff --git a/.github/workflows/autoSyncSingleCommit.yml b/.github/workflows/autoSyncSingleCommit.yml index 8700f2321..0a8c78148 100644 --- a/.github/workflows/autoSyncSingleCommit.yml +++ b/.github/workflows/autoSyncSingleCommit.yml @@ -1,36 +1,36 @@ name: autoSyncSingleCommit on: - push: - branches: - - main + push: + branches: + - main jobs: - doNotAutoSyncSingleCommit: - if: github.event.commits[1] != null - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: IGNORE autoSyncSingleCommit - run: | - echo This single commit has came from a merged commit. We will ignore it. This case is handled in autoSyncMergedPullRequest workflow for merge commits comming from merged pull requests only! Beware, the regular merge commits are not handled by any workflow for the moment. - autoSyncSingleCommit: - if: github.event.commits[1] == null - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: autoSyncSingleCommit - env: - GITHUB_CONTEXT: ${{ toJSON(github) }} - run: | - echo Autosync a single commit with id: ${{ github.sha }} from openSource main branch towards cloud hosted version. - - name: Repository Dispatch - uses: peter-evans/repository-dispatch@v2 - with: - token: ${{ secrets.AUTOSYNC_TOKEN }} - repository: ${{ secrets.AUTOSYNC_CH_URL }} - event-type: ${{ secrets.AUTOSYNC_SC_EVENT_TYPE }} - client-payload: >- - { - "ref": "${{ github.ref }}", - "sha": "${{ github.sha }}", - "commitMessage": "${{ github.event.commits[0].message }}" - } + doNotAutoSyncSingleCommit: + if: github.event.commits[1] != null + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: IGNORE autoSyncSingleCommit + run: | + echo This single commit has came from a merged commit. We will ignore it. This case is handled in autoSyncMergedPullRequest workflow for merge commits comming from merged pull requests only! Beware, the regular merge commits are not handled by any workflow for the moment. + autoSyncSingleCommit: + if: github.event.commits[1] == null + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: autoSyncSingleCommit + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + run: | + echo Autosync a single commit with id: ${{ github.sha }} from openSource main branch towards cloud hosted version. + - name: Repository Dispatch + uses: peter-evans/repository-dispatch@v2 + with: + token: ${{ secrets.AUTOSYNC_TOKEN }} + repository: ${{ secrets.AUTOSYNC_CH_URL }} + event-type: ${{ secrets.AUTOSYNC_SC_EVENT_TYPE }} + client-payload: >- + { + "ref": "${{ github.ref }}", + "sha": "${{ github.sha }}", + "commitMessage": "${{ github.event.commits[0].message }}" + } diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts new file mode 100644 index 000000000..e570b2636 --- /dev/null +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts @@ -0,0 +1,135 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex' + +interface AzureOpenAIConfig { + apiKey?: string + endpoint?: string + apiVersion?: string + deploymentName?: string +} + +class AzureChatOpenAI_LlamaIndex_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'AzureChatOpenAI' + this.name = 'azureChatOpenAI_LlamaIndex' + this.version = 1.0 + this.type = 'AzureChatOpenAI' + this.icon = 'Azure.svg' + this.category = 'Chat Models' + this.description = 'Wrapper around Azure OpenAI Chat LLM specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['azureOpenAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-32k', + name: 'gpt-4-32k' + }, + { + label: 'gpt-3.5-turbo', + name: 'gpt-3.5-turbo' + }, + { + label: 'gpt-3.5-turbo-16k', + name: 'gpt-3.5-turbo-16k' + } + ], + default: 'gpt-3.5-turbo-16k', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS + const temperature = nodeData.inputs?.temperature as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData) + const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData) + const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) + const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) + + const obj: Partial & { azure?: AzureOpenAIConfig } = { + temperature: parseFloat(temperature), + model: modelName, + azure: { + apiKey: azureOpenAIApiKey, + endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`, + apiVersion: azureOpenAIApiVersion, + deploymentName: azureOpenAIApiDeploymentName + } + } + + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAI(obj) + return model + } +} + +module.exports = { nodeClass: AzureChatOpenAI_LlamaIndex_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts new file mode 100644 index 000000000..69a15114e --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts @@ -0,0 +1,104 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { Anthropic } from 'llamaindex' + +class ChatAnthropic_LlamaIndex_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + tags: string[] + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatAnthropic' + this.name = 'chatAnthropic_LlamaIndex' + this.version = 1.0 + this.type = 'ChatAnthropic' + this.icon = 'Anthropic.svg' + this.category = 'Chat Models' + this.description = 'Wrapper around ChatAnthropic LLM specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(Anthropic)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['anthropicApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'claude-2', + name: 'claude-2', + description: 'Claude 2 latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-instant-1', + name: 'claude-instant-1', + description: 'Claude Instant latest major version, automatically get updates to the model as they are released' + } + ], + default: 'claude-2', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokensToSample', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top P', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as 'claude-2' | 'claude-instant-1' | undefined + const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string + const topP = nodeData.inputs?.topP as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData) + + const obj: Partial = { + temperature: parseFloat(temperature), + model: modelName, + apiKey: anthropicApiKey + } + + if (maxTokensToSample) obj.maxTokens = parseInt(maxTokensToSample, 10) + if (topP) obj.topP = parseFloat(topP) + + const model = new Anthropic(obj) + return model + } +} + +module.exports = { nodeClass: ChatAnthropic_LlamaIndex_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts new file mode 100644 index 000000000..8b3567a6c --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts @@ -0,0 +1,156 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex' + +class ChatOpenAI_LlamaIndex_LLMs implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatOpenAI' + this.name = 'chatOpenAI_LlamaIndex' + this.version = 1.0 + this.type = 'ChatOpenAI' + this.icon = 'openai.svg' + this.category = 'Chat Models' + this.description = 'Wrapper around OpenAI Chat LLM specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-turbo-preview', + name: 'gpt-4-turbo-preview' + }, + { + label: 'gpt-4-0125-preview', + name: 'gpt-4-0125-preview' + }, + { + label: 'gpt-4-1106-preview', + name: 'gpt-4-1106-preview' + }, + { + label: 'gpt-4-vision-preview', + name: 'gpt-4-vision-preview' + }, + { + label: 'gpt-4-0613', + name: 'gpt-4-0613' + }, + { + label: 'gpt-4-32k', + name: 'gpt-4-32k' + }, + { + label: 'gpt-4-32k-0613', + name: 'gpt-4-32k-0613' + }, + { + label: 'gpt-3.5-turbo', + name: 'gpt-3.5-turbo' + }, + { + label: 'gpt-3.5-turbo-1106', + name: 'gpt-3.5-turbo-1106' + }, + { + label: 'gpt-3.5-turbo-0613', + name: 'gpt-3.5-turbo-0613' + }, + { + label: 'gpt-3.5-turbo-16k', + name: 'gpt-3.5-turbo-16k' + }, + { + label: 'gpt-3.5-turbo-16k-0613', + name: 'gpt-3.5-turbo-16k-0613' + } + ], + default: 'gpt-3.5-turbo', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + + const obj: Partial = { + temperature: parseFloat(temperature), + model: modelName, + apiKey: openAIApiKey + } + + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAI(obj) + return model + } +} + +module.exports = { nodeClass: ChatOpenAI_LlamaIndex_LLMs } diff --git a/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts new file mode 100644 index 000000000..92f320bef --- /dev/null +++ b/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts @@ -0,0 +1,77 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAIEmbedding } from 'llamaindex' + +interface AzureOpenAIConfig { + apiKey?: string + endpoint?: string + apiVersion?: string + deploymentName?: string +} + +class AzureOpenAIEmbedding_LlamaIndex_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + tags: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'Azure OpenAI Embeddings' + this.name = 'azureOpenAIEmbeddingsLlamaIndex' + this.version = 1.0 + this.type = 'AzureOpenAIEmbeddings' + this.icon = 'Azure.svg' + this.category = 'Embeddings' + this.description = 'Azure OpenAI API embeddings specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['azureOpenAIApi'] + } + this.inputs = [ + { + label: 'Timeout', + name: 'timeout', + type: 'number', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData) + const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData) + const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) + const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) + + const obj: Partial & { azure?: AzureOpenAIConfig } = { + azure: { + apiKey: azureOpenAIApiKey, + endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`, + apiVersion: azureOpenAIApiVersion, + deploymentName: azureOpenAIApiDeploymentName + } + } + + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAIEmbedding(obj) + return model + } +} + +module.exports = { nodeClass: AzureOpenAIEmbedding_LlamaIndex_Embeddings } diff --git a/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts new file mode 100644 index 000000000..960197fe2 --- /dev/null +++ b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts @@ -0,0 +1,91 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAIEmbedding } from 'llamaindex' + +class OpenAIEmbedding_LlamaIndex_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'OpenAI Embedding' + this.name = 'openAIEmbedding_LlamaIndex' + this.version = 1.0 + this.type = 'OpenAIEmbedding' + this.icon = 'openai.svg' + this.category = 'Embeddings' + this.description = 'OpenAI Embedding specific for LlamaIndex' + this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'text-embedding-3-large', + name: 'text-embedding-3-large' + }, + { + label: 'text-embedding-3-small', + name: 'text-embedding-3-small' + }, + { + label: 'text-embedding-ada-002', + name: 'text-embedding-ada-002' + } + ], + default: 'text-embedding-ada-002', + optional: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + optional: true, + additionalParams: true + }, + { + label: 'BasePath', + name: 'basepath', + type: 'string', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const timeout = nodeData.inputs?.timeout as string + const modelName = nodeData.inputs?.modelName as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + + const obj: Partial = { + apiKey: openAIApiKey, + model: modelName + } + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAIEmbedding(obj) + return model + } +} + +module.exports = { nodeClass: OpenAIEmbedding_LlamaIndex_Embeddings } diff --git a/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts b/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts new file mode 100644 index 000000000..262ceb7c2 --- /dev/null +++ b/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts @@ -0,0 +1,149 @@ +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { BaseNode, Metadata, BaseRetriever, LLM, ContextChatEngine, ChatMessage } from 'llamaindex' +import { reformatSourceDocuments } from '../EngineUtils' + +class ContextChatEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Context Chat Engine' + this.name = 'contextChatEngine' + this.version = 1.0 + this.type = 'ContextChatEngine' + this.icon = 'context-chat-engine.png' + this.category = 'Engine' + this.description = 'Answer question based on retrieved documents (context) with built-in memory to remember conversation' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Vector Store Retriever', + name: 'vectorStoreRetriever', + type: 'VectorIndexRetriever' + }, + { + label: 'Memory', + name: 'memory', + type: 'BaseChatMemory' + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + }, + { + label: 'System Message', + name: 'systemMessagePrompt', + type: 'string', + rows: 4, + optional: true, + placeholder: + 'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.' + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const model = nodeData.inputs?.model as LLM + const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever + const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const memory = nodeData.inputs?.memory as FlowiseMemory + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + + const chatHistory = [] as ChatMessage[] + + if (systemMessagePrompt) { + chatHistory.push({ + content: systemMessagePrompt, + role: 'user' + }) + } + + const chatEngine = new ContextChatEngine({ chatModel: model, retriever: vectorStoreRetriever }) + + const msgs = (await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[] + for (const message of msgs) { + if (message.type === 'apiMessage') { + chatHistory.push({ + content: message.message, + role: 'assistant' + }) + } else if (message.type === 'userMessage') { + chatHistory.push({ + content: message.message, + role: 'user' + }) + } + } + + let text = '' + let isStreamingStarted = false + let sourceDocuments: ICommonObject[] = [] + let sourceNodes: BaseNode[] = [] + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await chatEngine.chat({ message: input, chatHistory, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + + if (returnSourceDocuments) { + sourceDocuments = reformatSourceDocuments(sourceNodes) + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments) + } + } else { + const response = await chatEngine.chat({ message: input, chatHistory }) + text = response?.response + sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? []) + } + + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: text, + type: 'apiMessage' + } + ], + this.sessionId + ) + + if (returnSourceDocuments) return { text, sourceDocuments } + else return { text } + } +} + +module.exports = { nodeClass: ContextChatEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts b/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts new file mode 100644 index 000000000..9bc9f3c05 --- /dev/null +++ b/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts @@ -0,0 +1,124 @@ +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { LLM, ChatMessage, SimpleChatEngine } from 'llamaindex' + +class SimpleChatEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Simple Chat Engine' + this.name = 'simpleChatEngine' + this.version = 1.0 + this.type = 'SimpleChatEngine' + this.icon = 'chat-engine.png' + this.category = 'Engine' + this.description = 'Simple engine to handle back and forth conversations' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Memory', + name: 'memory', + type: 'BaseChatMemory' + }, + { + label: 'System Message', + name: 'systemMessagePrompt', + type: 'string', + rows: 4, + optional: true, + placeholder: 'You are a helpful assistant' + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const model = nodeData.inputs?.model as LLM + const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const memory = nodeData.inputs?.memory as FlowiseMemory + + const chatHistory = [] as ChatMessage[] + + if (systemMessagePrompt) { + chatHistory.push({ + content: systemMessagePrompt, + role: 'user' + }) + } + + const chatEngine = new SimpleChatEngine({ llm: model }) + + const msgs = (await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[] + for (const message of msgs) { + if (message.type === 'apiMessage') { + chatHistory.push({ + content: message.message, + role: 'assistant' + }) + } else if (message.type === 'userMessage') { + chatHistory.push({ + content: message.message, + role: 'user' + }) + } + } + + let text = '' + let isStreamingStarted = false + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await chatEngine.chat({ message: input, chatHistory, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + } else { + const response = await chatEngine.chat({ message: input, chatHistory }) + text = response?.response + } + + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: text, + type: 'apiMessage' + } + ], + this.sessionId + ) + + return text + } +} + +module.exports = { nodeClass: SimpleChatEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/ChatEngine/chat-engine.png b/packages/components/nodes/engine/ChatEngine/chat-engine.png new file mode 100644 index 000000000..d614b8887 Binary files /dev/null and b/packages/components/nodes/engine/ChatEngine/chat-engine.png differ diff --git a/packages/components/nodes/engine/ChatEngine/context-chat-engine.png b/packages/components/nodes/engine/ChatEngine/context-chat-engine.png new file mode 100644 index 000000000..ef4adc131 Binary files /dev/null and b/packages/components/nodes/engine/ChatEngine/context-chat-engine.png differ diff --git a/packages/components/nodes/engine/EngineUtils.ts b/packages/components/nodes/engine/EngineUtils.ts new file mode 100644 index 000000000..9424e789a --- /dev/null +++ b/packages/components/nodes/engine/EngineUtils.ts @@ -0,0 +1,12 @@ +import { BaseNode, Metadata } from 'llamaindex' + +export const reformatSourceDocuments = (sourceNodes: BaseNode[]) => { + const sourceDocuments = [] + for (const node of sourceNodes) { + sourceDocuments.push({ + pageContent: (node as any).text, + metadata: node.metadata + }) + } + return sourceDocuments +} diff --git a/packages/components/nodes/engine/QueryEngine/QueryEngine.ts b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts new file mode 100644 index 000000000..bd6e040dc --- /dev/null +++ b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts @@ -0,0 +1,143 @@ +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { + RetrieverQueryEngine, + ResponseSynthesizer, + CompactAndRefine, + TreeSummarize, + Refine, + SimpleResponseBuilder, + BaseNode, + Metadata +} from 'llamaindex' +import { reformatSourceDocuments } from '../EngineUtils' + +class QueryEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Query Engine' + this.name = 'queryEngine' + this.version = 1.0 + this.type = 'QueryEngine' + this.icon = 'query-engine.png' + this.category = 'Engine' + this.description = 'Simple query engine built to answer question over your data, without memory' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Vector Store Retriever', + name: 'vectorStoreRetriever', + type: 'VectorIndexRetriever' + }, + { + label: 'Response Synthesizer', + name: 'responseSynthesizer', + type: 'ResponseSynthesizer', + description: + 'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more', + optional: true + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever + const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + + let queryEngine = new RetrieverQueryEngine(vectorStoreRetriever) + + if (responseSynthesizerObj) { + if (responseSynthesizerObj.type === 'TreeSummarize') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate), + serviceContext: vectorStoreRetriever.serviceContext + }) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'CompactAndRefine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new CompactAndRefine( + vectorStoreRetriever.serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext: vectorStoreRetriever.serviceContext + }) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'Refine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new Refine( + vectorStoreRetriever.serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext: vectorStoreRetriever.serviceContext + }) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext), + serviceContext: vectorStoreRetriever.serviceContext + }) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } + } + + let text = '' + let sourceDocuments: ICommonObject[] = [] + let sourceNodes: BaseNode[] = [] + let isStreamingStarted = false + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await queryEngine.query({ query: input, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + + if (returnSourceDocuments) { + sourceDocuments = reformatSourceDocuments(sourceNodes) + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments) + } + } else { + const response = await queryEngine.query({ query: input }) + text = response?.response + sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? []) + } + + if (returnSourceDocuments) return { text, sourceDocuments } + else return { text } + } +} + +module.exports = { nodeClass: QueryEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/QueryEngine/query-engine.png b/packages/components/nodes/engine/QueryEngine/query-engine.png new file mode 100644 index 000000000..68efdbe00 Binary files /dev/null and b/packages/components/nodes/engine/QueryEngine/query-engine.png differ diff --git a/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts b/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts new file mode 100644 index 000000000..a872c0a23 --- /dev/null +++ b/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts @@ -0,0 +1,193 @@ +import { flatten } from 'lodash' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { + TreeSummarize, + SimpleResponseBuilder, + Refine, + BaseEmbedding, + ResponseSynthesizer, + CompactAndRefine, + QueryEngineTool, + LLMQuestionGenerator, + SubQuestionQueryEngine, + BaseNode, + Metadata, + serviceContextFromDefaults +} from 'llamaindex' +import { reformatSourceDocuments } from '../EngineUtils' + +class SubQuestionQueryEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Sub Question Query Engine' + this.name = 'subQuestionQueryEngine' + this.version = 1.0 + this.type = 'SubQuestionQueryEngine' + this.icon = 'subQueryEngine.svg' + this.category = 'Engine' + this.description = + 'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'QueryEngine Tools', + name: 'queryEngineTools', + type: 'QueryEngineTool', + list: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Response Synthesizer', + name: 'responseSynthesizer', + type: 'ResponseSynthesizer', + description: + 'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more', + optional: true + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + + const serviceContext = serviceContextFromDefaults({ + llm: model, + embedModel: embeddings + }) + + let queryEngineTools = nodeData.inputs?.queryEngineTools as QueryEngineTool[] + queryEngineTools = flatten(queryEngineTools) + + let queryEngine = SubQuestionQueryEngine.fromDefaults({ + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + + const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + if (responseSynthesizerObj) { + if (responseSynthesizerObj.type === 'TreeSummarize') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new TreeSummarize(serviceContext, responseSynthesizerObj.textQAPromptTemplate), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'CompactAndRefine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new CompactAndRefine( + serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'Refine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new Refine( + serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new SimpleResponseBuilder(serviceContext), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } + } + + let text = '' + let sourceDocuments: ICommonObject[] = [] + let sourceNodes: BaseNode[] = [] + let isStreamingStarted = false + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await queryEngine.query({ query: input, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + + if (returnSourceDocuments) { + sourceDocuments = reformatSourceDocuments(sourceNodes) + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments) + } + } else { + const response = await queryEngine.query({ query: input }) + text = response?.response + sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? []) + } + + if (returnSourceDocuments) return { text, sourceDocuments } + else return { text } + } +} + +module.exports = { nodeClass: SubQuestionQueryEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg b/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg new file mode 100644 index 000000000..b94c20b5e --- /dev/null +++ b/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts b/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts new file mode 100644 index 000000000..db998e1f0 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts @@ -0,0 +1,75 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class CompactRefine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Compact and Refine' + this.name = 'compactrefineLlamaIndex' + this.version = 1.0 + this.type = 'CompactRefine' + this.icon = 'compactrefine.svg' + this.category = 'Response Synthesizer' + this.description = + 'CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Refine Prompt', + name: 'refinePrompt', + type: 'string', + rows: 4, + default: `The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer:`, + warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`, + optional: true + }, + { + label: 'Text QA Prompt', + name: 'textQAPrompt', + type: 'string', + rows: 4, + default: `Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const refinePrompt = nodeData.inputs?.refinePrompt as string + const textQAPrompt = nodeData.inputs?.textQAPrompt as string + + const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) => + refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query) + const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'CompactAndRefine' }) + } +} + +module.exports = { nodeClass: CompactRefine_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg b/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg new file mode 100644 index 000000000..9ea95529b --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/Refine/Refine.ts b/packages/components/nodes/responsesynthesizer/Refine/Refine.ts new file mode 100644 index 000000000..267bc2082 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/Refine/Refine.ts @@ -0,0 +1,75 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class Refine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Refine' + this.name = 'refineLlamaIndex' + this.version = 1.0 + this.type = 'Refine' + this.icon = 'refine.svg' + this.category = 'Response Synthesizer' + this.description = + 'Create and refine an answer by sequentially going through each retrieved text chunk. This makes a separate LLM call per Node. Good for more detailed answers.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Refine Prompt', + name: 'refinePrompt', + type: 'string', + rows: 4, + default: `The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer:`, + warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`, + optional: true + }, + { + label: 'Text QA Prompt', + name: 'textQAPrompt', + type: 'string', + rows: 4, + default: `Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const refinePrompt = nodeData.inputs?.refinePrompt as string + const textQAPrompt = nodeData.inputs?.textQAPrompt as string + + const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) => + refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query) + const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'Refine' }) + } +} + +module.exports = { nodeClass: Refine_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/Refine/refine.svg b/packages/components/nodes/responsesynthesizer/Refine/refine.svg new file mode 100644 index 000000000..1170c5848 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/Refine/refine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts new file mode 100644 index 000000000..cb8800206 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts @@ -0,0 +1,35 @@ +import { INode, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class SimpleResponseBuilder_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Simple Response Builder' + this.name = 'simpleResponseBuilderLlamaIndex' + this.version = 1.0 + this.type = 'SimpleResponseBuilder' + this.icon = 'simplerb.svg' + this.category = 'Response Synthesizer' + this.description = `Apply a query to a collection of text chunks, gathering the responses in an array, and return a combined string of all responses. Useful for individual queries on each text chunk.` + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [] + } + + async init(): Promise { + return new ResponseSynthesizerClass({ type: 'SimpleResponseBuilder' }) + } +} + +module.exports = { nodeClass: SimpleResponseBuilder_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg new file mode 100644 index 000000000..6f04fdc9b --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts b/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts new file mode 100644 index 000000000..448727869 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts @@ -0,0 +1,56 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class TreeSummarize_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'TreeSummarize' + this.name = 'treeSummarizeLlamaIndex' + this.version = 1.0 + this.type = 'TreeSummarize' + this.icon = 'treesummarize.svg' + this.category = 'Response Synthesizer' + this.description = + 'Given a set of text chunks and the query, recursively construct a tree and return the root node as the response. Good for summarization purposes.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Prompt', + name: 'prompt', + type: 'string', + rows: 4, + default: `Context information from multiple sources is below. +--------------------- +{context} +--------------------- +Given the information from multiple sources and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const prompt = nodeData.inputs?.prompt as string + + const textQAPromptTemplate = ({ context = '', query = '' }) => prompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, type: 'TreeSummarize' }) + } +} + +module.exports = { nodeClass: TreeSummarize_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg b/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg new file mode 100644 index 000000000..f81a3a533 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/base.ts b/packages/components/nodes/responsesynthesizer/base.ts new file mode 100644 index 000000000..68fd7f1ab --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/base.ts @@ -0,0 +1,11 @@ +export class ResponseSynthesizerClass { + type: string + textQAPromptTemplate?: any + refinePromptTemplate?: any + + constructor(params: { type: string; textQAPromptTemplate?: any; refinePromptTemplate?: any }) { + this.type = params.type + this.textQAPromptTemplate = params.textQAPromptTemplate + this.refinePromptTemplate = params.refinePromptTemplate + } +} diff --git a/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts b/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts new file mode 100644 index 000000000..163eff766 --- /dev/null +++ b/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts @@ -0,0 +1,68 @@ +import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { VectorStoreIndex } from 'llamaindex' + +class QueryEngine_Tools implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + tags: string[] + baseClasses: string[] + inputs?: INodeParams[] + + constructor() { + this.label = 'QueryEngine Tool' + this.name = 'queryEngineToolLlamaIndex' + this.version = 1.0 + this.type = 'QueryEngineTool' + this.icon = 'queryEngineTool.svg' + this.category = 'Tools' + this.tags = ['LlamaIndex'] + this.description = 'Tool used to invoke query engine' + this.baseClasses = [this.type] + this.inputs = [ + { + label: 'Vector Store Index', + name: 'vectorStoreIndex', + type: 'VectorStoreIndex' + }, + { + label: 'Tool Name', + name: 'toolName', + type: 'string', + description: 'Tool name must be small capital letter with underscore. Ex: my_tool' + }, + { + label: 'Tool Description', + name: 'toolDesc', + type: 'string', + rows: 4 + } + ] + } + + async init(nodeData: INodeData): Promise { + const vectorStoreIndex = nodeData.inputs?.vectorStoreIndex as VectorStoreIndex + const toolName = nodeData.inputs?.toolName as string + const toolDesc = nodeData.inputs?.toolDesc as string + const queryEngineTool = { + queryEngine: vectorStoreIndex.asQueryEngine({ + preFilters: { + ...(vectorStoreIndex as any).metadatafilter + } + }), + metadata: { + name: toolName, + description: toolDesc + }, + vectorStoreIndex + } + + return queryEngineTool + } +} + +module.exports = { nodeClass: QueryEngine_Tools } diff --git a/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg b/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg new file mode 100644 index 000000000..d49d8375c --- /dev/null +++ b/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts new file mode 100644 index 000000000..c0b2e5c11 --- /dev/null +++ b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts @@ -0,0 +1,383 @@ +import { + BaseNode, + Document, + Metadata, + VectorStore, + VectorStoreQuery, + VectorStoreQueryResult, + serviceContextFromDefaults, + storageContextFromDefaults, + VectorStoreIndex, + BaseEmbedding +} from 'llamaindex' +import { FetchResponse, Index, Pinecone, ScoredPineconeRecord } from '@pinecone-database/pinecone' +import { flatten } from 'lodash' +import { Document as LCDocument } from 'langchain/document' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { flattenObject, getCredentialData, getCredentialParam } from '../../../src/utils' + +class PineconeLlamaIndex_VectorStores implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + tags: string[] + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Pinecone' + this.name = 'pineconeLlamaIndex' + this.version = 1.0 + this.type = 'Pinecone' + this.icon = 'pinecone.svg' + this.category = 'Vector Stores' + this.description = `Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database` + this.baseClasses = [this.type, 'VectorIndexRetriever'] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['pineconeApi'] + } + this.inputs = [ + { + label: 'Document', + name: 'document', + type: 'Document', + list: true, + optional: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Pinecone Index', + name: 'pineconeIndex', + type: 'string' + }, + { + label: 'Pinecone Namespace', + name: 'pineconeNamespace', + type: 'string', + placeholder: 'my-first-namespace', + additionalParams: true, + optional: true + }, + { + label: 'Pinecone Metadata Filter', + name: 'pineconeMetadataFilter', + type: 'json', + optional: true, + additionalParams: true + }, + { + label: 'Top K', + name: 'topK', + description: 'Number of top results to fetch. Default to 4', + placeholder: '4', + type: 'number', + additionalParams: true, + optional: true + } + ] + this.outputs = [ + { + label: 'Pinecone Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Pinecone Vector Store Index', + name: 'vectorStore', + baseClasses: [this.type, 'VectorStoreIndex'] + } + ] + } + + //@ts-ignore + vectorStoreMethods = { + async upsert(nodeData: INodeData, options: ICommonObject): Promise { + const indexName = nodeData.inputs?.pineconeIndex as string + const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string + const docs = nodeData.inputs?.document as LCDocument[] + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData) + + const pcvs = new PineconeVectorStore({ + indexName, + apiKey: pineconeApiKey, + namespace: pineconeNamespace + }) + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + if (flattenDocs[i] && flattenDocs[i].pageContent) { + finalDocs.push(new LCDocument(flattenDocs[i])) + } + } + + const llamadocs: Document[] = [] + for (const doc of finalDocs) { + llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata })) + } + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ vectorStore: pcvs }) + + try { + await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext }) + } catch (e) { + throw new Error(e) + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const indexName = nodeData.inputs?.pineconeIndex as string + const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string + const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + const topK = nodeData.inputs?.topK as string + const k = topK ? parseFloat(topK) : 4 + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData) + + const obj: PineconeParams = { + indexName, + apiKey: pineconeApiKey + } + + if (pineconeNamespace) obj.namespace = pineconeNamespace + + let metadatafilter = {} + if (pineconeMetadataFilter) { + metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter) + obj.queryFilter = metadatafilter + } + + const pcvs = new PineconeVectorStore(obj) + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ vectorStore: pcvs }) + + const index = await VectorStoreIndex.init({ + nodes: [], + storageContext, + serviceContext + }) + + const output = nodeData.outputs?.output as string + + if (output === 'retriever') { + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + return retriever + } else if (output === 'vectorStore') { + ;(index as any).k = k + if (metadatafilter) { + ;(index as any).metadatafilter = metadatafilter + } + return index + } + return index + } +} + +type PineconeParams = { + indexName: string + apiKey: string + namespace?: string + chunkSize?: number + queryFilter?: object +} + +class PineconeVectorStore implements VectorStore { + storesText: boolean = true + db?: Pinecone + indexName: string + apiKey: string + chunkSize: number + namespace?: string + queryFilter?: object + + constructor(params: PineconeParams) { + this.indexName = params?.indexName + this.apiKey = params?.apiKey + this.namespace = params?.namespace ?? '' + this.chunkSize = params?.chunkSize ?? Number.parseInt(process.env.PINECONE_CHUNK_SIZE ?? '100') + this.queryFilter = params?.queryFilter ?? {} + } + + private async getDb(): Promise { + if (!this.db) { + this.db = new Pinecone({ + apiKey: this.apiKey + }) + } + return Promise.resolve(this.db) + } + + client() { + return this.getDb() + } + + async index() { + const db: Pinecone = await this.getDb() + return db.Index(this.indexName) + } + + async clearIndex() { + const db: Pinecone = await this.getDb() + return await db.index(this.indexName).deleteAll() + } + + async add(embeddingResults: BaseNode[]): Promise { + if (embeddingResults.length == 0) { + return Promise.resolve([]) + } + + const idx: Index = await this.index() + const nodes = embeddingResults.map(this.nodeToRecord) + + for (let i = 0; i < nodes.length; i += this.chunkSize) { + const chunk = nodes.slice(i, i + this.chunkSize) + const result = await this.saveChunk(idx, chunk) + if (!result) { + return Promise.reject() + } + } + return Promise.resolve([]) + } + + protected async saveChunk(idx: Index, chunk: any) { + try { + const namespace = idx.namespace(this.namespace ?? '') + await namespace.upsert(chunk) + return true + } catch (err) { + return false + } + } + + async delete(refDocId: string): Promise { + const idx = await this.index() + const namespace = idx.namespace(this.namespace ?? '') + return namespace.deleteOne(refDocId) + } + + async query(query: VectorStoreQuery): Promise { + const queryOptions: any = { + vector: query.queryEmbedding, + topK: query.similarityTopK, + filter: this.queryFilter + } + + const idx = await this.index() + const namespace = idx.namespace(this.namespace ?? '') + const results = await namespace.query(queryOptions) + + const idList = results.matches.map((row) => row.id) + const records: FetchResponse = await namespace.fetch(idList) + const rows = Object.values(records.records) + + const nodes = rows.map((row) => { + return new Document({ + id_: row.id, + text: this.textFromResultRow(row), + metadata: this.metaWithoutText(row.metadata), + embedding: row.values + }) + }) + + const result = { + nodes: nodes, + similarities: results.matches.map((row) => row.score || 999), + ids: results.matches.map((row) => row.id) + } + + return Promise.resolve(result) + } + + /** + * Required by VectorStore interface. Currently ignored. + */ + persist(): Promise { + return Promise.resolve() + } + + textFromResultRow(row: ScoredPineconeRecord): string { + return row.metadata?.text ?? '' + } + + metaWithoutText(meta: Metadata): any { + return Object.keys(meta) + .filter((key) => key != 'text') + .reduce((acc: any, key: string) => { + acc[key] = meta[key] + return acc + }, {}) + } + + nodeToRecord(node: BaseNode) { + let id: any = node.id_.length ? node.id_ : null + return { + id: id, + values: node.getEmbedding(), + metadata: { + ...cleanupMetadata(node.metadata), + text: (node as any).text + } + } + } +} + +const cleanupMetadata = (nodeMetadata: ICommonObject) => { + // Pinecone doesn't support nested objects, so we flatten them + const documentMetadata: any = { ...nodeMetadata } + // preserve string arrays which are allowed + const stringArrays: Record = {} + for (const key of Object.keys(documentMetadata)) { + if (Array.isArray(documentMetadata[key]) && documentMetadata[key].every((el: any) => typeof el === 'string')) { + stringArrays[key] = documentMetadata[key] + delete documentMetadata[key] + } + } + const metadata: { + [key: string]: string | number | boolean | string[] | null + } = { + ...flattenObject(documentMetadata), + ...stringArrays + } + // Pinecone doesn't support null values, so we remove them + for (const key of Object.keys(metadata)) { + if (metadata[key] == null) { + delete metadata[key] + } else if (typeof metadata[key] === 'object' && Object.keys(metadata[key] as unknown as object).length === 0) { + delete metadata[key] + } + } + return metadata +} + +module.exports = { nodeClass: PineconeLlamaIndex_VectorStores } diff --git a/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts new file mode 100644 index 000000000..36c383e98 --- /dev/null +++ b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts @@ -0,0 +1,145 @@ +import path from 'path' +import { flatten } from 'lodash' +import { storageContextFromDefaults, serviceContextFromDefaults, VectorStoreIndex, Document } from 'llamaindex' +import { Document as LCDocument } from 'langchain/document' +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { getUserHome } from '../../../src' + +class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'SimpleStore' + this.name = 'simpleStoreLlamaIndex' + this.version = 1.0 + this.type = 'SimpleVectorStore' + this.icon = 'simplevs.svg' + this.category = 'Vector Stores' + this.description = 'Upsert embedded data to local path and perform similarity search' + this.baseClasses = [this.type, 'VectorIndexRetriever'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Document', + name: 'document', + type: 'Document', + list: true, + optional: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Base Path to store', + name: 'basePath', + description: + 'Path to store persist embeddings indexes with persistence. If not specified, default to same path where database is stored', + type: 'string', + optional: true + }, + { + label: 'Top K', + name: 'topK', + description: 'Number of top results to fetch. Default to 4', + placeholder: '4', + type: 'number', + optional: true + } + ] + this.outputs = [ + { + label: 'SimpleStore Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'SimpleStore Vector Store Index', + name: 'vectorStore', + baseClasses: [this.type, 'VectorStoreIndex'] + } + ] + } + + //@ts-ignore + vectorStoreMethods = { + async upsert(nodeData: INodeData): Promise { + const basePath = nodeData.inputs?.basePath as string + const docs = nodeData.inputs?.document as LCDocument[] + const embeddings = nodeData.inputs?.embeddings + const model = nodeData.inputs?.model + + let filePath = '' + if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex') + else filePath = basePath + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + finalDocs.push(new LCDocument(flattenDocs[i])) + } + + const llamadocs: Document[] = [] + for (const doc of finalDocs) { + llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata })) + } + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ persistDir: filePath }) + + try { + await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext }) + } catch (e) { + throw new Error(e) + } + } + } + + async init(nodeData: INodeData): Promise { + const basePath = nodeData.inputs?.basePath as string + const embeddings = nodeData.inputs?.embeddings + const model = nodeData.inputs?.model + const topK = nodeData.inputs?.topK as string + const k = topK ? parseFloat(topK) : 4 + + let filePath = '' + if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex') + else filePath = basePath + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ persistDir: filePath }) + + const index = await VectorStoreIndex.init({ storageContext, serviceContext }) + + const output = nodeData.outputs?.output as string + + if (output === 'retriever') { + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + return retriever + } else if (output === 'vectorStore') { + ;(index as any).k = k + return index + } + return index + } +} + +module.exports = { nodeClass: SimpleStoreUpsert_LlamaIndex_VectorStores } diff --git a/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg b/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg new file mode 100644 index 000000000..52c74432b --- /dev/null +++ b/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/packages/components/package.json b/packages/components/package.json index 7a7d2e758..bcb746b04 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -61,6 +61,7 @@ "langfuse-langchain": "2.3.3", "langsmith": "0.0.53", "linkifyjs": "^4.1.1", + "llamaindex": "^0.0.48", "llmonitor": "^0.5.5", "mammoth": "^1.5.1", "moment": "^2.29.3", diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index fe08f0706..40d84d06b 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -98,6 +98,7 @@ export interface INodeProperties { version: number category: string // TODO: use enum instead of string baseClasses: string[] + tags?: string[] description?: string filePath?: string badge?: string diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index 7e9a68eb4..548632ea2 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -662,6 +662,28 @@ export const convertSchemaToZod = (schema: string | object): ICommonObject => { } } +/** + * Flatten nested object + * @param {ICommonObject} obj + * @param {string} parentKey + * @returns {ICommonObject} + */ +export const flattenObject = (obj: ICommonObject, parentKey?: string) => { + let result: any = {} + + Object.keys(obj).forEach((key) => { + const value = obj[key] + const _key = parentKey ? parentKey + '.' + key : key + if (typeof value === 'object') { + result = { ...result, ...flattenObject(value, _key) } + } else { + result[_key] = value + } + }) + + return result +} + /** * Convert BaseMessage to IMessage * @param {BaseMessage[]} messages diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json new file mode 100644 index 000000000..475c6b3a5 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -0,0 +1,917 @@ +{ + "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 438, + "id": "textFile_0", + "position": { + "x": 221.215421786192, + "y": 94.91489477412404 + }, + "type": "customNode", + "data": { + "id": "textFile_0", + "label": "Text File", + "version": 3, + "name": "textFile", + "type": "Document", + "baseClasses": ["Document"], + "category": "Document Loaders", + "description": "Load data from text files", + "inputParams": [ + { + "label": "Txt File", + "name": "txtFile", + "type": "file", + "fileType": ".txt, .html, .aspx, .asp, .cpp, .c, .cs, .css, .go, .h, .java, .js, .less, .ts, .php, .proto, .python, .py, .rst, .ruby, .rb, .rs, .scala, .sc, .scss, .sol, .sql, .swift, .markdown, .md, .tex, .ltx, .vb, .xml", + "id": "textFile_0-input-txtFile-file" + }, + { + "label": "Metadata", + "name": "metadata", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "textFile_0-input-metadata-json" + } + ], + "inputAnchors": [ + { + "label": "Text Splitter", + "name": "textSplitter", + "type": "TextSplitter", + "optional": true, + "id": "textFile_0-input-textSplitter-TextSplitter" + } + ], + "inputs": { + "textSplitter": "{{recursiveCharacterTextSplitter_0.data.instance}}", + "metadata": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "textFile_0-output-document-Document", + "name": "document", + "label": "Document", + "type": "Document" + }, + { + "id": "textFile_0-output-text-string|json", + "name": "text", + "label": "Text", + "type": "string | json" + } + ], + "default": "document" + } + ], + "outputs": { + "output": "document" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 221.215421786192, + "y": 94.91489477412404 + }, + "dragging": false + }, + { + "width": 300, + "height": 429, + "id": "recursiveCharacterTextSplitter_0", + "position": { + "x": -203.4868320229876, + "y": 101.32475976329766 + }, + "type": "customNode", + "data": { + "id": "recursiveCharacterTextSplitter_0", + "label": "Recursive Character Text Splitter", + "version": 2, + "name": "recursiveCharacterTextSplitter", + "type": "RecursiveCharacterTextSplitter", + "baseClasses": ["RecursiveCharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer", "Runnable"], + "category": "Text Splitters", + "description": "Split documents recursively by different characters - starting with \"\\n\\n\", then \"\\n\", then \" \"", + "inputParams": [ + { + "label": "Chunk Size", + "name": "chunkSize", + "type": "number", + "default": 1000, + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-chunkSize-number" + }, + { + "label": "Chunk Overlap", + "name": "chunkOverlap", + "type": "number", + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-chunkOverlap-number" + }, + { + "label": "Custom Separators", + "name": "separators", + "type": "string", + "rows": 4, + "description": "Array of custom separators to determine when to split the text, will override the default separators", + "placeholder": "[\"|\", \"##\", \">\", \"-\"]", + "additionalParams": true, + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-separators-string" + } + ], + "inputAnchors": [], + "inputs": { + "chunkSize": 1000, + "chunkOverlap": "", + "separators": "" + }, + "outputAnchors": [ + { + "id": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable", + "name": "recursiveCharacterTextSplitter", + "label": "RecursiveCharacterTextSplitter", + "type": "RecursiveCharacterTextSplitter | TextSplitter | BaseDocumentTransformer | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -203.4868320229876, + "y": 101.32475976329766 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": 176.27434578083106, + "y": 953.3664298122493 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbedding_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 176.27434578083106, + "y": 953.3664298122493 + }, + "dragging": false + }, + { + "width": 300, + "height": 585, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 609.3087433345761, + "y": 488.2141798951578 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": ["{{textFile_0.data.instance}}"], + "model": "{{chatOpenAI_LlamaIndex_1.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "", + "pineconeNamespace": "", + "pineconeMetadataFilter": "", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 609.3087433345761, + "y": 488.2141798951578 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_1", + "position": { + "x": -195.15244974578656, + "y": 584.9467028201428 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_1", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -195.15244974578656, + "y": 584.9467028201428 + }, + "dragging": false + }, + { + "width": 300, + "height": 513, + "id": "contextChatEngine_0", + "position": { + "x": 1550.2553933740128, + "y": 270.7914631777829 + }, + "type": "customNode", + "data": { + "id": "contextChatEngine_0", + "label": "Context Chat Engine", + "version": 1, + "name": "contextChatEngine", + "type": "ContextChatEngine", + "baseClasses": ["ContextChatEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation", + "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "contextChatEngine_0-input-returnSourceDocuments-boolean" + }, + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", + "id": "contextChatEngine_0-input-systemMessagePrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Vector Store Retriever", + "name": "vectorStoreRetriever", + "type": "VectorIndexRetriever", + "id": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "contextChatEngine_0-input-memory-BaseChatMemory" + } + ], + "inputs": { + "model": "{{chatOpenAI_LlamaIndex_2.data.instance}}", + "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", + "memory": "{{RedisBackedChatMemory_0.data.instance}}", + "systemMessagePrompt": "", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "contextChatEngine_0-output-contextChatEngine-ContextChatEngine", + "name": "contextChatEngine", + "label": "ContextChatEngine", + "type": "ContextChatEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1550.2553933740128, + "y": 270.7914631777829 + }, + "dragging": false + }, + { + "width": 300, + "height": 329, + "id": "RedisBackedChatMemory_0", + "position": { + "x": 1081.252815805786, + "y": 990.1701092562037 + }, + "type": "customNode", + "data": { + "id": "RedisBackedChatMemory_0", + "label": "Redis-Backed Chat Memory", + "version": 2, + "name": "RedisBackedChatMemory", + "type": "RedisBackedChatMemory", + "baseClasses": ["RedisBackedChatMemory", "BaseChatMemory", "BaseMemory"], + "category": "Memory", + "description": "Summarizes the conversation and stores the memory in Redis server", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "optional": true, + "credentialNames": ["redisCacheApi", "redisCacheUrlApi"], + "id": "RedisBackedChatMemory_0-input-credential-credential" + }, + { + "label": "Session Id", + "name": "sessionId", + "type": "string", + "description": "If not specified, a random id will be used. Learn more", + "default": "", + "additionalParams": true, + "optional": true, + "id": "RedisBackedChatMemory_0-input-sessionId-string" + }, + { + "label": "Session Timeouts", + "name": "sessionTTL", + "type": "number", + "description": "Omit this parameter to make sessions never expire", + "additionalParams": true, + "optional": true, + "id": "RedisBackedChatMemory_0-input-sessionTTL-number" + }, + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "additionalParams": true, + "id": "RedisBackedChatMemory_0-input-memoryKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "sessionId": "", + "sessionTTL": "", + "memoryKey": "chat_history" + }, + "outputAnchors": [ + { + "id": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory", + "name": "RedisBackedChatMemory", + "label": "RedisBackedChatMemory", + "type": "RedisBackedChatMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 1081.252815805786, + "y": 990.1701092562037 + } + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_2", + "position": { + "x": 1015.1605888108386, + "y": -38.31143117572401 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_2", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_2-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_2-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_2-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1015.1605888108386, + "y": -38.31143117572401 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "recursiveCharacterTextSplitter_0", + "sourceHandle": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable", + "target": "textFile_0", + "targetHandle": "textFile_0-input-textSplitter-TextSplitter", + "type": "buttonedge", + "id": "recursiveCharacterTextSplitter_0-recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable-textFile_0-textFile_0-input-textSplitter-TextSplitter", + "data": { + "label": "" + } + }, + { + "source": "textFile_0", + "sourceHandle": "textFile_0-output-document-Document", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-document-Document", + "type": "buttonedge", + "id": "textFile_0-textFile_0-output-document-Document-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_LlamaIndex_1", + "sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-contextChatEngine_0-contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "data": { + "label": "" + } + }, + { + "source": "RedisBackedChatMemory_0", + "sourceHandle": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-contextChatEngine_0-contextChatEngine_0-input-memory-BaseChatMemory", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_LlamaIndex_2", + "sourceHandle": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_2-chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-contextChatEngine_0-contextChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Query Engine.json b/packages/server/marketplaces/chatflows/Query Engine.json new file mode 100644 index 000000000..825533339 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Query Engine.json @@ -0,0 +1,547 @@ +{ + "description": "Stateless query engine designed to answer question over your data using LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 382, + "id": "queryEngine_0", + "position": { + "x": 1407.9610494306783, + "y": 241.12144405808692 + }, + "type": "customNode", + "data": { + "id": "queryEngine_0", + "label": "Query Engine", + "version": 1, + "name": "queryEngine", + "type": "QueryEngine", + "baseClasses": ["QueryEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple query engine built to answer question over your data, without memory", + "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "queryEngine_0-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Retriever", + "name": "vectorStoreRetriever", + "type": "VectorIndexRetriever", + "id": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever" + }, + { + "label": "Response Synthesizer", + "name": "responseSynthesizer", + "type": "ResponseSynthesizer", + "description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more", + "optional": true, + "id": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + } + ], + "inputs": { + "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", + "responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "queryEngine_0-output-queryEngine-QueryEngine", + "name": "queryEngine", + "label": "QueryEngine", + "type": "QueryEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1407.9610494306783, + "y": 241.12144405808692 + }, + "dragging": false + }, + { + "width": 300, + "height": 585, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 977.3886641397302, + "y": -261.2253031641797 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": "", + "model": "{{chatAnthropic_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "", + "pineconeNamespace": "", + "pineconeMetadataFilter": "", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 977.3886641397302, + "y": -261.2253031641797 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": 529.8690713844503, + "y": -18.955726653613254 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbedding_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 529.8690713844503, + "y": -18.955726653613254 + }, + "dragging": false + }, + { + "width": 300, + "height": 749, + "id": "compactrefineLlamaIndex_0", + "position": { + "x": 170.71031618977543, + "y": -33.83233752386292 + }, + "type": "customNode", + "data": { + "id": "compactrefineLlamaIndex_0", + "label": "Compact and Refine", + "version": 1, + "name": "compactrefineLlamaIndex", + "type": "CompactRefine", + "baseClasses": ["CompactRefine", "ResponseSynthesizer"], + "tags": ["LlamaIndex"], + "category": "Response Synthesizer", + "description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.", + "inputParams": [ + { + "label": "Refine Prompt", + "name": "refinePrompt", + "type": "string", + "rows": 4, + "default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-refinePrompt-string" + }, + { + "label": "Text QA Prompt", + "name": "textQAPrompt", + "type": "string", + "rows": 4, + "default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:", + "warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-textQAPrompt-string" + } + ], + "inputAnchors": [], + "inputs": { + "refinePrompt": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "textQAPrompt": "Context information:\n\n{context}\n\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}" + }, + "outputAnchors": [ + { + "id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "name": "compactrefineLlamaIndex", + "label": "CompactRefine", + "type": "CompactRefine | ResponseSynthesizer" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 170.71031618977543, + "y": -33.83233752386292 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatAnthropic_LlamaIndex_0", + "position": { + "x": 521.3530883359147, + "y": -584.8241219614786 + }, + "type": "customNode", + "data": { + "id": "chatAnthropic_LlamaIndex_0", + "label": "ChatAnthropic", + "version": 1, + "name": "chatAnthropic_LlamaIndex", + "type": "ChatAnthropic", + "baseClasses": ["ChatAnthropic", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around ChatAnthropic LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["anthropicApi"], + "id": "chatAnthropic_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "claude-2", + "name": "claude-2", + "description": "Claude 2 latest major version, automatically get updates to the model as they are released" + }, + { + "label": "claude-2.1", + "name": "claude-2.1", + "description": "Claude 2 latest full version" + }, + { + "label": "claude-instant-1", + "name": "claude-instant-1", + "description": "Claude Instant latest major version, automatically get updates to the model as they are released" + }, + { + "label": "claude-v1", + "name": "claude-v1" + }, + { + "label": "claude-v1-100k", + "name": "claude-v1-100k" + }, + { + "label": "claude-v1.0", + "name": "claude-v1.0" + }, + { + "label": "claude-v1.2", + "name": "claude-v1.2" + }, + { + "label": "claude-v1.3", + "name": "claude-v1.3" + }, + { + "label": "claude-v1.3-100k", + "name": "claude-v1.3-100k" + }, + { + "label": "claude-instant-v1", + "name": "claude-instant-v1" + }, + { + "label": "claude-instant-v1-100k", + "name": "claude-instant-v1-100k" + }, + { + "label": "claude-instant-v1.0", + "name": "claude-instant-v1.0" + }, + { + "label": "claude-instant-v1.1", + "name": "claude-instant-v1.1" + }, + { + "label": "claude-instant-v1.1-100k", + "name": "claude-instant-v1.1-100k" + } + ], + "default": "claude-2", + "optional": true, + "id": "chatAnthropic_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatAnthropic_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokensToSample", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_LlamaIndex_0-input-maxTokensToSample-number" + }, + { + "label": "Top P", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_LlamaIndex_0-input-topP-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "claude-2", + "temperature": 0.9, + "maxTokensToSample": "", + "topP": "" + }, + "outputAnchors": [ + { + "id": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex", + "name": "chatAnthropic_LlamaIndex", + "label": "ChatAnthropic", + "type": "ChatAnthropic | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 521.3530883359147, + "y": -584.8241219614786 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "target": "queryEngine_0", + "targetHandle": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-queryEngine_0-queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "data": { + "label": "" + } + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "compactrefineLlamaIndex_0", + "sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "target": "queryEngine_0", + "targetHandle": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "type": "buttonedge", + "id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-queryEngine_0-queryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "data": { + "label": "" + } + }, + { + "source": "chatAnthropic_LlamaIndex_0", + "sourceHandle": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatAnthropic_LlamaIndex_0-chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Simple Chat Engine.json b/packages/server/marketplaces/chatflows/Simple Chat Engine.json new file mode 100644 index 000000000..630b68335 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Simple Chat Engine.json @@ -0,0 +1,270 @@ +{ + "description": "Simple chat engine to handle back and forth conversations using LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 462, + "id": "simpleChatEngine_0", + "position": { + "x": 1210.127368000538, + "y": 324.98110560103896 + }, + "type": "customNode", + "data": { + "id": "simpleChatEngine_0", + "label": "Simple Chat Engine", + "version": 1, + "name": "simpleChatEngine", + "type": "SimpleChatEngine", + "baseClasses": ["SimpleChatEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple engine to handle back and forth conversations", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "placeholder": "You are a helpful assistant", + "id": "simpleChatEngine_0-input-systemMessagePrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "simpleChatEngine_0-input-memory-BaseChatMemory" + } + ], + "inputs": { + "model": "{{azureChatOpenAI_LlamaIndex_0.data.instance}}", + "memory": "{{bufferMemory_0.data.instance}}", + "systemMessagePrompt": "You are a helpful assistant." + }, + "outputAnchors": [ + { + "id": "simpleChatEngine_0-output-simpleChatEngine-SimpleChatEngine", + "name": "simpleChatEngine", + "label": "SimpleChatEngine", + "type": "SimpleChatEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 1210.127368000538, + "y": 324.98110560103896 + } + }, + { + "width": 300, + "height": 376, + "id": "bufferMemory_0", + "position": { + "x": 393.9823478014782, + "y": 415.7414943210391 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "version": 1, + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"], + "category": "Memory", + "description": "Remembers previous conversational back and forths directly", + "inputParams": [ + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "id": "bufferMemory_0-input-memoryKey-string" + }, + { + "label": "Input Key", + "name": "inputKey", + "type": "string", + "default": "input", + "id": "bufferMemory_0-input-inputKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "memoryKey": "chat_history", + "inputKey": "input" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 393.9823478014782, + "y": 415.7414943210391 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "azureChatOpenAI_LlamaIndex_0", + "position": { + "x": 746.5530862509605, + "y": -54.107978373323306 + }, + "type": "customNode", + "data": { + "id": "azureChatOpenAI_LlamaIndex_0", + "label": "AzureChatOpenAI", + "version": 1, + "name": "azureChatOpenAI_LlamaIndex", + "type": "AzureChatOpenAI", + "baseClasses": ["AzureChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around Azure OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["azureOpenAIApi"], + "id": "azureChatOpenAI_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + } + ], + "default": "gpt-3.5-turbo-16k", + "optional": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex", + "name": "azureChatOpenAI_LlamaIndex", + "label": "AzureChatOpenAI", + "type": "AzureChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 746.5530862509605, + "y": -54.107978373323306 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "simpleChatEngine_0", + "targetHandle": "simpleChatEngine_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-simpleChatEngine_0-simpleChatEngine_0-input-memory-BaseChatMemory", + "data": { + "label": "" + } + }, + { + "source": "azureChatOpenAI_LlamaIndex_0", + "sourceHandle": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex", + "target": "simpleChatEngine_0", + "targetHandle": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "azureChatOpenAI_LlamaIndex_0-azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex-simpleChatEngine_0-simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 045e40dd7..dbb5717df 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -482,7 +482,12 @@ export class App { const isEndingNode = endingNodeData?.outputs?.output === 'EndingNode' if (!isEndingNode) { - if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') { + if ( + endingNodeData && + endingNodeData.category !== 'Chains' && + endingNodeData.category !== 'Agents' && + endingNodeData.category !== 'Engine' + ) { return res.status(500).send(`Ending node must be either a Chain or Agent`) } } @@ -1690,7 +1695,12 @@ export class App { const isEndingNode = endingNodeData?.outputs?.output === 'EndingNode' if (!isEndingNode) { - if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') { + if ( + endingNodeData && + endingNodeData.category !== 'Chains' && + endingNodeData.category !== 'Agents' && + endingNodeData.category !== 'Engine' + ) { return res.status(500).send(`Ending node must be either a Chain or Agent`) } diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 2d3f000ac..421a15ece 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -818,7 +818,16 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component */ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => { const streamAvailableLLMs = { - 'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock', 'chatMistralAI'], + 'Chat Models': [ + 'azureChatOpenAI', + 'chatOpenAI', + 'chatOpenAI_LlamaIndex', + 'chatAnthropic', + 'chatAnthropic_LlamaIndex', + 'chatOllama', + 'awsChatBedrock', + 'chatMistralAI' + ], LLMs: ['azureOpenAI', 'openAI', 'ollama'] } @@ -841,6 +850,9 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod // Agent that are available to stream const whitelistAgents = ['openAIFunctionAgent', 'csvAgent', 'airtableAgent', 'conversationalRetrievalAgent'] isValidChainOrAgent = whitelistAgents.includes(endingNodeData.name) + } else if (endingNodeData.category === 'Engine') { + const whitelistEngine = ['contextChatEngine', 'simpleChatEngine', 'queryEngine', 'subQuestionQueryEngine'] + isValidChainOrAgent = whitelistEngine.includes(endingNodeData.name) } // If no output parser, flow is available to stream diff --git a/packages/ui/src/assets/images/llamaindex.png b/packages/ui/src/assets/images/llamaindex.png new file mode 100644 index 000000000..139c33eb0 Binary files /dev/null and b/packages/ui/src/assets/images/llamaindex.png differ diff --git a/packages/ui/src/ui-component/dialog/NodeInfoDialog.js b/packages/ui/src/ui-component/dialog/NodeInfoDialog.js index 6f3bec5de..a5dbd4113 100644 --- a/packages/ui/src/ui-component/dialog/NodeInfoDialog.js +++ b/packages/ui/src/ui-component/dialog/NodeInfoDialog.js @@ -132,6 +132,35 @@ const NodeInfoDialog = ({ show, dialogProps, onCancel }) => { )} + {dialogProps.data.tags && + dialogProps.data.tags.length && + dialogProps.data.tags.map((tag, index) => ( +
+ + {tag.toLowerCase()} + +
+ ))} diff --git a/packages/ui/src/utils/genericHelper.js b/packages/ui/src/utils/genericHelper.js index 57ba88926..eadbdb881 100644 --- a/packages/ui/src/utils/genericHelper.js +++ b/packages/ui/src/utils/genericHelper.js @@ -277,6 +277,7 @@ export const generateExportFlowData = (flowData) => { name: node.data.name, type: node.data.type, baseClasses: node.data.baseClasses, + tags: node.data.tags, category: node.data.category, description: node.data.description, inputParams: node.data.inputParams, diff --git a/packages/ui/src/views/canvas/AddNodes.js b/packages/ui/src/views/canvas/AddNodes.js index 7bf3e7ff0..61db1716e 100644 --- a/packages/ui/src/views/canvas/AddNodes.js +++ b/packages/ui/src/views/canvas/AddNodes.js @@ -22,7 +22,9 @@ import { Popper, Stack, Typography, - Chip + Chip, + Tab, + Tabs } from '@mui/material' import ExpandMoreIcon from '@mui/icons-material/ExpandMore' @@ -36,12 +38,20 @@ import { StyledFab } from 'ui-component/button/StyledFab' // icons import { IconPlus, IconSearch, IconMinus, IconX } from '@tabler/icons' +import LlamaindexPNG from 'assets/images/llamaindex.png' +import LangChainPNG from 'assets/images/langchain.png' // const import { baseURL } from 'store/constant' import { SET_COMPONENT_NODES } from 'store/actions' // ==============================|| ADD NODES||============================== // +function a11yProps(index) { + return { + id: `attachment-tab-${index}`, + 'aria-controls': `attachment-tabpanel-${index}` + } +} const AddNodes = ({ nodesData, node }) => { const theme = useTheme() @@ -52,6 +62,7 @@ const AddNodes = ({ nodesData, node }) => { const [nodes, setNodes] = useState({}) const [open, setOpen] = useState(false) const [categoryExpanded, setCategoryExpanded] = useState({}) + const [tabValue, setTabValue] = useState(0) const anchorRef = useRef(null) const prevOpen = useRef(open) @@ -86,6 +97,11 @@ const AddNodes = ({ nodesData, node }) => { } } + const handleTabChange = (event, newValue) => { + setTabValue(newValue) + filterSearch(searchValue, newValue) + } + const getSearchedNodes = (value) => { const passed = nodesData.filter((nd) => { const passesQuery = nd.name.toLowerCase().includes(value.toLowerCase()) @@ -95,23 +111,34 @@ const AddNodes = ({ nodesData, node }) => { return passed } - const filterSearch = (value) => { + const filterSearch = (value, newTabValue) => { setSearchValue(value) setTimeout(() => { if (value) { const returnData = getSearchedNodes(value) - groupByCategory(returnData, true) + groupByCategory(returnData, newTabValue ?? tabValue, true) scrollTop() } else if (value === '') { - groupByCategory(nodesData) + groupByCategory(nodesData, newTabValue ?? tabValue) scrollTop() } }, 500) } - const groupByCategory = (nodes, isFilter) => { + const groupByTags = (nodes, newTabValue = 0) => { + const langchainNodes = nodes.filter((nd) => !nd.tags) + const llmaindexNodes = nodes.filter((nd) => nd.tags && nd.tags.includes('LlamaIndex')) + if (newTabValue === 0) { + return langchainNodes + } else { + return llmaindexNodes + } + } + + const groupByCategory = (nodes, newTabValue, isFilter) => { + const taggedNodes = groupByTags(nodes, newTabValue) const accordianCategories = {} - const result = nodes.reduce(function (r, a) { + const result = taggedNodes.reduce(function (r, a) { r[a.category] = r[a.category] || [] r[a.category].push(a) accordianCategories[a.category] = isFilter ? true : false @@ -244,15 +271,72 @@ const AddNodes = ({ nodesData, node }) => { 'aria-label': 'weight' }} /> + + {['LangChain', 'LlamaIndex'].map((item, index) => ( + + {item} + + } + iconPosition='start' + sx={{ minHeight: '50px', height: '50px' }} + key={index} + label={item} + {...a11yProps(index)} + > + ))} +
+ BETA +
+
+ { ps.current = el }} - style={{ height: '100%', maxHeight: 'calc(100vh - 320px)', overflowX: 'hidden' }} + style={{ height: '100%', maxHeight: 'calc(100vh - 380px)', overflowX: 'hidden' }} > - + { {data.label} +
+ {data.tags && data.tags.includes('LlamaIndex') && ( + <> +
+ LlamaIndex +
+ + )} {warningMessage && ( <> -
{warningMessage}} placement='top'> diff --git a/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js b/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js index 8ec5ada30..44cb75e8d 100644 --- a/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js +++ b/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js @@ -13,6 +13,7 @@ import AdditionalParamsDialog from 'ui-component/dialog/AdditionalParamsDialog' // const import { baseURL } from 'store/constant' +import LlamaindexPNG from 'assets/images/llamaindex.png' const CardWrapper = styled(MainCard)(({ theme }) => ({ background: theme.palette.card.main, @@ -87,6 +88,23 @@ const MarketplaceCanvasNode = ({ data }) => { {data.label}
+
+ {data.tags && data.tags.includes('LlamaIndex') && ( + <> +
+ LlamaIndex +
+ + )} {(data.inputAnchors.length > 0 || data.inputParams.length > 0) && ( <>