Merge branch 'main' into chore/Upgrade-LC-version
# Conflicts: # packages/components/package.json
|
|
@ -3,7 +3,7 @@ on:
|
|||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
branches: [ "main" ]
|
||||
branches: ['main']
|
||||
jobs:
|
||||
autoSyncMergedPullRequest:
|
||||
if: github.event.pull_request.merged == true
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "1.4.11",
|
||||
"version": "1.5.0",
|
||||
"private": true,
|
||||
"homepage": "https://flowiseai.com",
|
||||
"workspaces": [
|
||||
|
|
|
|||
|
|
@ -0,0 +1,135 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
|
||||
|
||||
interface AzureOpenAIConfig {
|
||||
apiKey?: string
|
||||
endpoint?: string
|
||||
apiVersion?: string
|
||||
deploymentName?: string
|
||||
}
|
||||
|
||||
class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'AzureChatOpenAI'
|
||||
this.name = 'azureChatOpenAI_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'AzureChatOpenAI'
|
||||
this.icon = 'Azure.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Azure OpenAI Chat LLM specific for LlamaIndex'
|
||||
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['azureOpenAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'gpt-4',
|
||||
name: 'gpt-4'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k',
|
||||
name: 'gpt-4-32k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo',
|
||||
name: 'gpt-3.5-turbo'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-16k',
|
||||
name: 'gpt-3.5-turbo-16k'
|
||||
}
|
||||
],
|
||||
default: 'gpt-3.5-turbo-16k',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens',
|
||||
name: 'maxTokens',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top Probability',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Timeout',
|
||||
name: 'timeout',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const timeout = nodeData.inputs?.timeout as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
|
||||
const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData)
|
||||
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
|
||||
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<OpenAI> & { azure?: AzureOpenAIConfig } = {
|
||||
temperature: parseFloat(temperature),
|
||||
model: modelName,
|
||||
azure: {
|
||||
apiKey: azureOpenAIApiKey,
|
||||
endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`,
|
||||
apiVersion: azureOpenAIApiVersion,
|
||||
deploymentName: azureOpenAIApiDeploymentName
|
||||
}
|
||||
}
|
||||
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
|
||||
const model = new OpenAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: AzureChatOpenAI_LlamaIndex_ChatModels }
|
||||
|
|
@ -0,0 +1,104 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { Anthropic } from 'llamaindex'
|
||||
|
||||
class ChatAnthropic_LlamaIndex_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
tags: string[]
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatAnthropic'
|
||||
this.name = 'chatAnthropic_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatAnthropic'
|
||||
this.icon = 'Anthropic.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around ChatAnthropic LLM specific for LlamaIndex'
|
||||
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(Anthropic)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['anthropicApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'claude-2',
|
||||
name: 'claude-2',
|
||||
description: 'Claude 2 latest major version, automatically get updates to the model as they are released'
|
||||
},
|
||||
{
|
||||
label: 'claude-instant-1',
|
||||
name: 'claude-instant-1',
|
||||
description: 'Claude Instant latest major version, automatically get updates to the model as they are released'
|
||||
}
|
||||
],
|
||||
default: 'claude-2',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens',
|
||||
name: 'maxTokensToSample',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top P',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as 'claude-2' | 'claude-instant-1' | undefined
|
||||
const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<Anthropic> = {
|
||||
temperature: parseFloat(temperature),
|
||||
model: modelName,
|
||||
apiKey: anthropicApiKey
|
||||
}
|
||||
|
||||
if (maxTokensToSample) obj.maxTokens = parseInt(maxTokensToSample, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
|
||||
const model = new Anthropic(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatAnthropic_LlamaIndex_ChatModels }
|
||||
|
|
@ -79,6 +79,10 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
label: 'gpt-3.5-turbo',
|
||||
name: 'gpt-3.5-turbo'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-0125',
|
||||
name: 'gpt-3.5-turbo-0125'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-1106',
|
||||
name: 'gpt-3.5-turbo-1106'
|
||||
|
|
|
|||
|
|
@ -0,0 +1,156 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
|
||||
|
||||
class ChatOpenAI_LlamaIndex_LLMs implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatOpenAI'
|
||||
this.name = 'chatOpenAI_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatOpenAI'
|
||||
this.icon = 'openai.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around OpenAI Chat LLM specific for LlamaIndex'
|
||||
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['openAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'gpt-4',
|
||||
name: 'gpt-4'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-turbo-preview',
|
||||
name: 'gpt-4-turbo-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-0125-preview',
|
||||
name: 'gpt-4-0125-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-1106-preview',
|
||||
name: 'gpt-4-1106-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-vision-preview',
|
||||
name: 'gpt-4-vision-preview'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-0613',
|
||||
name: 'gpt-4-0613'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k',
|
||||
name: 'gpt-4-32k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-4-32k-0613',
|
||||
name: 'gpt-4-32k-0613'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo',
|
||||
name: 'gpt-3.5-turbo'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-1106',
|
||||
name: 'gpt-3.5-turbo-1106'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-0613',
|
||||
name: 'gpt-3.5-turbo-0613'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-16k',
|
||||
name: 'gpt-3.5-turbo-16k'
|
||||
},
|
||||
{
|
||||
label: 'gpt-3.5-turbo-16k-0613',
|
||||
name: 'gpt-3.5-turbo-16k-0613'
|
||||
}
|
||||
],
|
||||
default: 'gpt-3.5-turbo',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens',
|
||||
name: 'maxTokens',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top Probability',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Timeout',
|
||||
name: 'timeout',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const timeout = nodeData.inputs?.timeout as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<OpenAI> = {
|
||||
temperature: parseFloat(temperature),
|
||||
model: modelName,
|
||||
apiKey: openAIApiKey
|
||||
}
|
||||
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
|
||||
const model = new OpenAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatOpenAI_LlamaIndex_LLMs }
|
||||
|
|
@ -34,6 +34,12 @@ class Folder_DocumentLoaders implements INode {
|
|||
type: 'string',
|
||||
placeholder: ''
|
||||
},
|
||||
{
|
||||
label: 'Recursive',
|
||||
name: 'recursive',
|
||||
type: 'boolean',
|
||||
additionalParams: false
|
||||
},
|
||||
{
|
||||
label: 'Text Splitter',
|
||||
name: 'textSplitter',
|
||||
|
|
@ -54,8 +60,11 @@ class Folder_DocumentLoaders implements INode {
|
|||
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
|
||||
const folderPath = nodeData.inputs?.folderPath as string
|
||||
const metadata = nodeData.inputs?.metadata
|
||||
const recursive = nodeData.inputs?.recursive as boolean
|
||||
|
||||
const loader = new DirectoryLoader(folderPath, {
|
||||
const loader = new DirectoryLoader(
|
||||
folderPath,
|
||||
{
|
||||
'.json': (path) => new JSONLoader(path),
|
||||
'.txt': (path) => new TextLoader(path),
|
||||
'.csv': (path) => new CSVLoader(path),
|
||||
|
|
@ -70,6 +79,7 @@ class Folder_DocumentLoaders implements INode {
|
|||
'.css': (path) => new TextLoader(path),
|
||||
'.go': (path) => new TextLoader(path), // Go
|
||||
'.h': (path) => new TextLoader(path), // C++ Header files
|
||||
'.kt': (path) => new TextLoader(path), // Kotlin
|
||||
'.java': (path) => new TextLoader(path), // Java
|
||||
'.js': (path) => new TextLoader(path), // JavaScript
|
||||
'.less': (path) => new TextLoader(path), // Less files
|
||||
|
|
@ -95,7 +105,9 @@ class Folder_DocumentLoaders implements INode {
|
|||
'.html': (path) => new TextLoader(path), // HTML
|
||||
'.vb': (path) => new TextLoader(path), // Visual Basic
|
||||
'.xml': (path) => new TextLoader(path) // XML
|
||||
})
|
||||
},
|
||||
recursive
|
||||
)
|
||||
let docs = []
|
||||
|
||||
if (textSplitter) {
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ class VectorStoreToDocument_DocumentLoaders implements INode {
|
|||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
baseClasses: this.baseClasses
|
||||
baseClasses: [...this.baseClasses, 'json']
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
|
|
|
|||
|
|
@ -0,0 +1,77 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { OpenAIEmbedding } from 'llamaindex'
|
||||
|
||||
interface AzureOpenAIConfig {
|
||||
apiKey?: string
|
||||
endpoint?: string
|
||||
apiVersion?: string
|
||||
deploymentName?: string
|
||||
}
|
||||
|
||||
class AzureOpenAIEmbedding_LlamaIndex_Embeddings implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Azure OpenAI Embeddings'
|
||||
this.name = 'azureOpenAIEmbeddingsLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'AzureOpenAIEmbeddings'
|
||||
this.icon = 'Azure.svg'
|
||||
this.category = 'Embeddings'
|
||||
this.description = 'Azure OpenAI API embeddings specific for LlamaIndex'
|
||||
this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['azureOpenAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Timeout',
|
||||
name: 'timeout',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const timeout = nodeData.inputs?.timeout as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
|
||||
const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData)
|
||||
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
|
||||
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<OpenAIEmbedding> & { azure?: AzureOpenAIConfig } = {
|
||||
azure: {
|
||||
apiKey: azureOpenAIApiKey,
|
||||
endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`,
|
||||
apiVersion: azureOpenAIApiVersion,
|
||||
deploymentName: azureOpenAIApiDeploymentName
|
||||
}
|
||||
}
|
||||
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
|
||||
const model = new OpenAIEmbedding(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: AzureOpenAIEmbedding_LlamaIndex_Embeddings }
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { OpenAIEmbedding } from 'llamaindex'
|
||||
|
||||
class OpenAIEmbedding_LlamaIndex_Embeddings implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'OpenAI Embedding'
|
||||
this.name = 'openAIEmbedding_LlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'OpenAIEmbedding'
|
||||
this.icon = 'openai.svg'
|
||||
this.category = 'Embeddings'
|
||||
this.description = 'OpenAI Embedding specific for LlamaIndex'
|
||||
this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['openAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'text-embedding-3-large',
|
||||
name: 'text-embedding-3-large'
|
||||
},
|
||||
{
|
||||
label: 'text-embedding-3-small',
|
||||
name: 'text-embedding-3-small'
|
||||
},
|
||||
{
|
||||
label: 'text-embedding-ada-002',
|
||||
name: 'text-embedding-ada-002'
|
||||
}
|
||||
],
|
||||
default: 'text-embedding-ada-002',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Timeout',
|
||||
name: 'timeout',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'BasePath',
|
||||
name: 'basepath',
|
||||
type: 'string',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const timeout = nodeData.inputs?.timeout as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<OpenAIEmbedding> = {
|
||||
apiKey: openAIApiKey,
|
||||
model: modelName
|
||||
}
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
|
||||
const model = new OpenAIEmbedding(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: OpenAIEmbedding_LlamaIndex_Embeddings }
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { BaseNode, Metadata, BaseRetriever, LLM, ContextChatEngine, ChatMessage } from 'llamaindex'
|
||||
import { reformatSourceDocuments } from '../EngineUtils'
|
||||
|
||||
class ContextChatEngine_LlamaIndex implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Context Chat Engine'
|
||||
this.name = 'contextChatEngine'
|
||||
this.version = 1.0
|
||||
this.type = 'ContextChatEngine'
|
||||
this.icon = 'context-chat-engine.png'
|
||||
this.category = 'Engine'
|
||||
this.description = 'Answer question based on retrieved documents (context) with built-in memory to remember conversation'
|
||||
this.baseClasses = [this.type]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'Vector Store Retriever',
|
||||
name: 'vectorStoreRetriever',
|
||||
type: 'VectorIndexRetriever'
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Return Source Documents',
|
||||
name: 'returnSourceDocuments',
|
||||
type: 'boolean',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessagePrompt',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
placeholder:
|
||||
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const model = nodeData.inputs?.model as LLM
|
||||
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
|
||||
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
|
||||
|
||||
const chatHistory = [] as ChatMessage[]
|
||||
|
||||
if (systemMessagePrompt) {
|
||||
chatHistory.push({
|
||||
content: systemMessagePrompt,
|
||||
role: 'user'
|
||||
})
|
||||
}
|
||||
|
||||
const chatEngine = new ContextChatEngine({ chatModel: model, retriever: vectorStoreRetriever })
|
||||
|
||||
const msgs = (await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]
|
||||
for (const message of msgs) {
|
||||
if (message.type === 'apiMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'assistant'
|
||||
})
|
||||
} else if (message.type === 'userMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'user'
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
let text = ''
|
||||
let isStreamingStarted = false
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let sourceNodes: BaseNode<Metadata>[] = []
|
||||
const isStreamingEnabled = options.socketIO && options.socketIOClientId
|
||||
|
||||
if (isStreamingEnabled) {
|
||||
const stream = await chatEngine.chat({ message: input, chatHistory, stream: true })
|
||||
for await (const chunk of stream) {
|
||||
text += chunk.response
|
||||
if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response)
|
||||
}
|
||||
|
||||
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response)
|
||||
}
|
||||
|
||||
if (returnSourceDocuments) {
|
||||
sourceDocuments = reformatSourceDocuments(sourceNodes)
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments)
|
||||
}
|
||||
} else {
|
||||
const response = await chatEngine.chat({ message: input, chatHistory })
|
||||
text = response?.response
|
||||
sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? [])
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: text,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
if (returnSourceDocuments) return { text, sourceDocuments }
|
||||
else return { text }
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ContextChatEngine_LlamaIndex }
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { LLM, ChatMessage, SimpleChatEngine } from 'llamaindex'
|
||||
|
||||
class SimpleChatEngine_LlamaIndex implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Simple Chat Engine'
|
||||
this.name = 'simpleChatEngine'
|
||||
this.version = 1.0
|
||||
this.type = 'SimpleChatEngine'
|
||||
this.icon = 'chat-engine.png'
|
||||
this.category = 'Engine'
|
||||
this.description = 'Simple engine to handle back and forth conversations'
|
||||
this.baseClasses = [this.type]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessagePrompt',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
placeholder: 'You are a helpful assistant'
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
const model = nodeData.inputs?.model as LLM
|
||||
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
|
||||
const chatHistory = [] as ChatMessage[]
|
||||
|
||||
if (systemMessagePrompt) {
|
||||
chatHistory.push({
|
||||
content: systemMessagePrompt,
|
||||
role: 'user'
|
||||
})
|
||||
}
|
||||
|
||||
const chatEngine = new SimpleChatEngine({ llm: model })
|
||||
|
||||
const msgs = (await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]
|
||||
for (const message of msgs) {
|
||||
if (message.type === 'apiMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'assistant'
|
||||
})
|
||||
} else if (message.type === 'userMessage') {
|
||||
chatHistory.push({
|
||||
content: message.message,
|
||||
role: 'user'
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
let text = ''
|
||||
let isStreamingStarted = false
|
||||
const isStreamingEnabled = options.socketIO && options.socketIOClientId
|
||||
|
||||
if (isStreamingEnabled) {
|
||||
const stream = await chatEngine.chat({ message: input, chatHistory, stream: true })
|
||||
for await (const chunk of stream) {
|
||||
text += chunk.response
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response)
|
||||
}
|
||||
|
||||
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response)
|
||||
}
|
||||
} else {
|
||||
const response = await chatEngine.chat({ message: input, chatHistory })
|
||||
text = response?.response
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: text,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return text
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: SimpleChatEngine_LlamaIndex }
|
||||
|
After Width: | Height: | Size: 9.8 KiB |
|
After Width: | Height: | Size: 9.5 KiB |
|
|
@ -0,0 +1,12 @@
|
|||
import { BaseNode, Metadata } from 'llamaindex'
|
||||
|
||||
export const reformatSourceDocuments = (sourceNodes: BaseNode<Metadata>[]) => {
|
||||
const sourceDocuments = []
|
||||
for (const node of sourceNodes) {
|
||||
sourceDocuments.push({
|
||||
pageContent: (node as any).text,
|
||||
metadata: node.metadata
|
||||
})
|
||||
}
|
||||
return sourceDocuments
|
||||
}
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import {
|
||||
RetrieverQueryEngine,
|
||||
ResponseSynthesizer,
|
||||
CompactAndRefine,
|
||||
TreeSummarize,
|
||||
Refine,
|
||||
SimpleResponseBuilder,
|
||||
BaseNode,
|
||||
Metadata
|
||||
} from 'llamaindex'
|
||||
import { reformatSourceDocuments } from '../EngineUtils'
|
||||
|
||||
class QueryEngine_LlamaIndex implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Query Engine'
|
||||
this.name = 'queryEngine'
|
||||
this.version = 1.0
|
||||
this.type = 'QueryEngine'
|
||||
this.icon = 'query-engine.png'
|
||||
this.category = 'Engine'
|
||||
this.description = 'Simple query engine built to answer question over your data, without memory'
|
||||
this.baseClasses = [this.type]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Vector Store Retriever',
|
||||
name: 'vectorStoreRetriever',
|
||||
type: 'VectorIndexRetriever'
|
||||
},
|
||||
{
|
||||
label: 'Response Synthesizer',
|
||||
name: 'responseSynthesizer',
|
||||
type: 'ResponseSynthesizer',
|
||||
description:
|
||||
'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target="_blank" href="https://ts.llamaindex.ai/modules/low_level/response_synthesizer">more</a>',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Return Source Documents',
|
||||
name: 'returnSourceDocuments',
|
||||
type: 'boolean',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
|
||||
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever
|
||||
const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer
|
||||
|
||||
let queryEngine = new RetrieverQueryEngine(vectorStoreRetriever)
|
||||
|
||||
if (responseSynthesizerObj) {
|
||||
if (responseSynthesizerObj.type === 'TreeSummarize') {
|
||||
const responseSynthesizer = new ResponseSynthesizer({
|
||||
responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate),
|
||||
serviceContext: vectorStoreRetriever.serviceContext
|
||||
})
|
||||
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
|
||||
} else if (responseSynthesizerObj.type === 'CompactAndRefine') {
|
||||
const responseSynthesizer = new ResponseSynthesizer({
|
||||
responseBuilder: new CompactAndRefine(
|
||||
vectorStoreRetriever.serviceContext,
|
||||
responseSynthesizerObj.textQAPromptTemplate,
|
||||
responseSynthesizerObj.refinePromptTemplate
|
||||
),
|
||||
serviceContext: vectorStoreRetriever.serviceContext
|
||||
})
|
||||
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
|
||||
} else if (responseSynthesizerObj.type === 'Refine') {
|
||||
const responseSynthesizer = new ResponseSynthesizer({
|
||||
responseBuilder: new Refine(
|
||||
vectorStoreRetriever.serviceContext,
|
||||
responseSynthesizerObj.textQAPromptTemplate,
|
||||
responseSynthesizerObj.refinePromptTemplate
|
||||
),
|
||||
serviceContext: vectorStoreRetriever.serviceContext
|
||||
})
|
||||
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
|
||||
} else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') {
|
||||
const responseSynthesizer = new ResponseSynthesizer({
|
||||
responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext),
|
||||
serviceContext: vectorStoreRetriever.serviceContext
|
||||
})
|
||||
queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
|
||||
}
|
||||
}
|
||||
|
||||
let text = ''
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let sourceNodes: BaseNode<Metadata>[] = []
|
||||
let isStreamingStarted = false
|
||||
const isStreamingEnabled = options.socketIO && options.socketIOClientId
|
||||
|
||||
if (isStreamingEnabled) {
|
||||
const stream = await queryEngine.query({ query: input, stream: true })
|
||||
for await (const chunk of stream) {
|
||||
text += chunk.response
|
||||
if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response)
|
||||
}
|
||||
|
||||
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response)
|
||||
}
|
||||
|
||||
if (returnSourceDocuments) {
|
||||
sourceDocuments = reformatSourceDocuments(sourceNodes)
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments)
|
||||
}
|
||||
} else {
|
||||
const response = await queryEngine.query({ query: input })
|
||||
text = response?.response
|
||||
sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? [])
|
||||
}
|
||||
|
||||
if (returnSourceDocuments) return { text, sourceDocuments }
|
||||
else return { text }
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: QueryEngine_LlamaIndex }
|
||||
|
After Width: | Height: | Size: 12 KiB |
|
|
@ -0,0 +1,193 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import {
|
||||
TreeSummarize,
|
||||
SimpleResponseBuilder,
|
||||
Refine,
|
||||
BaseEmbedding,
|
||||
ResponseSynthesizer,
|
||||
CompactAndRefine,
|
||||
QueryEngineTool,
|
||||
LLMQuestionGenerator,
|
||||
SubQuestionQueryEngine,
|
||||
BaseNode,
|
||||
Metadata,
|
||||
serviceContextFromDefaults
|
||||
} from 'llamaindex'
|
||||
import { reformatSourceDocuments } from '../EngineUtils'
|
||||
|
||||
class SubQuestionQueryEngine_LlamaIndex implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Sub Question Query Engine'
|
||||
this.name = 'subQuestionQueryEngine'
|
||||
this.version = 1.0
|
||||
this.type = 'SubQuestionQueryEngine'
|
||||
this.icon = 'subQueryEngine.svg'
|
||||
this.category = 'Engine'
|
||||
this.description =
|
||||
'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response'
|
||||
this.baseClasses = [this.type]
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'QueryEngine Tools',
|
||||
name: 'queryEngineTools',
|
||||
type: 'QueryEngineTool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'Embeddings',
|
||||
name: 'embeddings',
|
||||
type: 'BaseEmbedding_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'Response Synthesizer',
|
||||
name: 'responseSynthesizer',
|
||||
type: 'ResponseSynthesizer',
|
||||
description:
|
||||
'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target="_blank" href="https://ts.llamaindex.ai/modules/low_level/response_synthesizer">more</a>',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Return Source Documents',
|
||||
name: 'returnSourceDocuments',
|
||||
type: 'boolean',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
|
||||
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
|
||||
const model = nodeData.inputs?.model
|
||||
|
||||
const serviceContext = serviceContextFromDefaults({
|
||||
llm: model,
|
||||
embedModel: embeddings
|
||||
})
|
||||
|
||||
let queryEngineTools = nodeData.inputs?.queryEngineTools as QueryEngineTool[]
|
||||
queryEngineTools = flatten(queryEngineTools)
|
||||
|
||||
let queryEngine = SubQuestionQueryEngine.fromDefaults({
|
||||
serviceContext,
|
||||
queryEngineTools,
|
||||
questionGen: new LLMQuestionGenerator({ llm: model })
|
||||
})
|
||||
|
||||
const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer
|
||||
if (responseSynthesizerObj) {
|
||||
if (responseSynthesizerObj.type === 'TreeSummarize') {
|
||||
const responseSynthesizer = new ResponseSynthesizer({
|
||||
responseBuilder: new TreeSummarize(serviceContext, responseSynthesizerObj.textQAPromptTemplate),
|
||||
serviceContext
|
||||
})
|
||||
queryEngine = SubQuestionQueryEngine.fromDefaults({
|
||||
responseSynthesizer,
|
||||
serviceContext,
|
||||
queryEngineTools,
|
||||
questionGen: new LLMQuestionGenerator({ llm: model })
|
||||
})
|
||||
} else if (responseSynthesizerObj.type === 'CompactAndRefine') {
|
||||
const responseSynthesizer = new ResponseSynthesizer({
|
||||
responseBuilder: new CompactAndRefine(
|
||||
serviceContext,
|
||||
responseSynthesizerObj.textQAPromptTemplate,
|
||||
responseSynthesizerObj.refinePromptTemplate
|
||||
),
|
||||
serviceContext
|
||||
})
|
||||
queryEngine = SubQuestionQueryEngine.fromDefaults({
|
||||
responseSynthesizer,
|
||||
serviceContext,
|
||||
queryEngineTools,
|
||||
questionGen: new LLMQuestionGenerator({ llm: model })
|
||||
})
|
||||
} else if (responseSynthesizerObj.type === 'Refine') {
|
||||
const responseSynthesizer = new ResponseSynthesizer({
|
||||
responseBuilder: new Refine(
|
||||
serviceContext,
|
||||
responseSynthesizerObj.textQAPromptTemplate,
|
||||
responseSynthesizerObj.refinePromptTemplate
|
||||
),
|
||||
serviceContext
|
||||
})
|
||||
queryEngine = SubQuestionQueryEngine.fromDefaults({
|
||||
responseSynthesizer,
|
||||
serviceContext,
|
||||
queryEngineTools,
|
||||
questionGen: new LLMQuestionGenerator({ llm: model })
|
||||
})
|
||||
} else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') {
|
||||
const responseSynthesizer = new ResponseSynthesizer({
|
||||
responseBuilder: new SimpleResponseBuilder(serviceContext),
|
||||
serviceContext
|
||||
})
|
||||
queryEngine = SubQuestionQueryEngine.fromDefaults({
|
||||
responseSynthesizer,
|
||||
serviceContext,
|
||||
queryEngineTools,
|
||||
questionGen: new LLMQuestionGenerator({ llm: model })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
let text = ''
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let sourceNodes: BaseNode<Metadata>[] = []
|
||||
let isStreamingStarted = false
|
||||
const isStreamingEnabled = options.socketIO && options.socketIOClientId
|
||||
|
||||
if (isStreamingEnabled) {
|
||||
const stream = await queryEngine.query({ query: input, stream: true })
|
||||
for await (const chunk of stream) {
|
||||
text += chunk.response
|
||||
if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
options.socketIO.to(options.socketIOClientId).emit('start', chunk.response)
|
||||
}
|
||||
|
||||
options.socketIO.to(options.socketIOClientId).emit('token', chunk.response)
|
||||
}
|
||||
|
||||
if (returnSourceDocuments) {
|
||||
sourceDocuments = reformatSourceDocuments(sourceNodes)
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments)
|
||||
}
|
||||
} else {
|
||||
const response = await queryEngine.query({ query: input })
|
||||
text = response?.response
|
||||
sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? [])
|
||||
}
|
||||
|
||||
if (returnSourceDocuments) return { text, sourceDocuments }
|
||||
else return { text }
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: SubQuestionQueryEngine_LlamaIndex }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-filter-question" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M15 19l-6 2v-8.5l-4.48 -4.928a2 2 0 0 1 -.52 -1.345v-2.227h16v2.172a2 2 0 0 1 -.586 1.414l-4.414 4.414" /><path d="M19 22v.01" /><path d="M19 19a2.003 2.003 0 0 0 .914 -3.782a1.98 1.98 0 0 0 -2.414 .483" /></svg>
|
||||
|
After Width: | Height: | Size: 506 B |
|
|
@ -0,0 +1,75 @@
|
|||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { ResponseSynthesizerClass } from '../base'
|
||||
|
||||
class CompactRefine_LlamaIndex implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Compact and Refine'
|
||||
this.name = 'compactrefineLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'CompactRefine'
|
||||
this.icon = 'compactrefine.svg'
|
||||
this.category = 'Response Synthesizer'
|
||||
this.description =
|
||||
'CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.'
|
||||
this.baseClasses = [this.type, 'ResponseSynthesizer']
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Refine Prompt',
|
||||
name: 'refinePrompt',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
default: `The original query is as follows: {query}
|
||||
We have provided an existing answer: {existingAnswer}
|
||||
We have the opportunity to refine the existing answer (only if needed) with some more context below.
|
||||
------------
|
||||
{context}
|
||||
------------
|
||||
Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.
|
||||
Refined Answer:`,
|
||||
warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Text QA Prompt',
|
||||
name: 'textQAPrompt',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
default: `Context information is below.
|
||||
---------------------
|
||||
{context}
|
||||
---------------------
|
||||
Given the context information and not prior knowledge, answer the query.
|
||||
Query: {query}
|
||||
Answer:`,
|
||||
warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const refinePrompt = nodeData.inputs?.refinePrompt as string
|
||||
const textQAPrompt = nodeData.inputs?.textQAPrompt as string
|
||||
|
||||
const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) =>
|
||||
refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query)
|
||||
const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query)
|
||||
|
||||
return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'CompactAndRefine' })
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: CompactRefine_LlamaIndex }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layers-difference" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M16 16v2a2 2 0 0 1 -2 2h-8a2 2 0 0 1 -2 -2v-8a2 2 0 0 1 2 -2h2v-2a2 2 0 0 1 2 -2h8a2 2 0 0 1 2 2v8a2 2 0 0 1 -2 2h-2" /><path d="M10 8l-2 0l0 2" /><path d="M8 14l0 2l2 0" /><path d="M14 8l2 0l0 2" /><path d="M16 14l0 2l-2 0" /></svg>
|
||||
|
After Width: | Height: | Size: 529 B |
|
|
@ -0,0 +1,75 @@
|
|||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { ResponseSynthesizerClass } from '../base'
|
||||
|
||||
class Refine_LlamaIndex implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Refine'
|
||||
this.name = 'refineLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'Refine'
|
||||
this.icon = 'refine.svg'
|
||||
this.category = 'Response Synthesizer'
|
||||
this.description =
|
||||
'Create and refine an answer by sequentially going through each retrieved text chunk. This makes a separate LLM call per Node. Good for more detailed answers.'
|
||||
this.baseClasses = [this.type, 'ResponseSynthesizer']
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Refine Prompt',
|
||||
name: 'refinePrompt',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
default: `The original query is as follows: {query}
|
||||
We have provided an existing answer: {existingAnswer}
|
||||
We have the opportunity to refine the existing answer (only if needed) with some more context below.
|
||||
------------
|
||||
{context}
|
||||
------------
|
||||
Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.
|
||||
Refined Answer:`,
|
||||
warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Text QA Prompt',
|
||||
name: 'textQAPrompt',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
default: `Context information is below.
|
||||
---------------------
|
||||
{context}
|
||||
---------------------
|
||||
Given the context information and not prior knowledge, answer the query.
|
||||
Query: {query}
|
||||
Answer:`,
|
||||
warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const refinePrompt = nodeData.inputs?.refinePrompt as string
|
||||
const textQAPrompt = nodeData.inputs?.textQAPrompt as string
|
||||
|
||||
const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) =>
|
||||
refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query)
|
||||
const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query)
|
||||
|
||||
return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'Refine' })
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Refine_LlamaIndex }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-filter-search" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M11.36 20.213l-2.36 .787v-8.5l-4.48 -4.928a2 2 0 0 1 -.52 -1.345v-2.227h16v2.172a2 2 0 0 1 -.586 1.414l-4.414 4.414" /><path d="M18 18m-3 0a3 3 0 1 0 6 0a3 3 0 1 0 -6 0" /><path d="M20.2 20.2l1.8 1.8" /></svg>
|
||||
|
After Width: | Height: | Size: 501 B |
|
|
@ -0,0 +1,35 @@
|
|||
import { INode, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { ResponseSynthesizerClass } from '../base'
|
||||
|
||||
class SimpleResponseBuilder_LlamaIndex implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Simple Response Builder'
|
||||
this.name = 'simpleResponseBuilderLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'SimpleResponseBuilder'
|
||||
this.icon = 'simplerb.svg'
|
||||
this.category = 'Response Synthesizer'
|
||||
this.description = `Apply a query to a collection of text chunks, gathering the responses in an array, and return a combined string of all responses. Useful for individual queries on each text chunk.`
|
||||
this.baseClasses = [this.type, 'ResponseSynthesizer']
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = []
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return new ResponseSynthesizerClass({ type: 'SimpleResponseBuilder' })
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: SimpleResponseBuilder_LlamaIndex }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-quote" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M10 11h-4a1 1 0 0 1 -1 -1v-3a1 1 0 0 1 1 -1h3a1 1 0 0 1 1 1v6c0 2.667 -1.333 4.333 -4 5" /><path d="M19 11h-4a1 1 0 0 1 -1 -1v-3a1 1 0 0 1 1 -1h3a1 1 0 0 1 1 1v6c0 2.667 -1.333 4.333 -4 5" /></svg>
|
||||
|
After Width: | Height: | Size: 481 B |
|
|
@ -0,0 +1,56 @@
|
|||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { ResponseSynthesizerClass } from '../base'
|
||||
|
||||
class TreeSummarize_LlamaIndex implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'TreeSummarize'
|
||||
this.name = 'treeSummarizeLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'TreeSummarize'
|
||||
this.icon = 'treesummarize.svg'
|
||||
this.category = 'Response Synthesizer'
|
||||
this.description =
|
||||
'Given a set of text chunks and the query, recursively construct a tree and return the root node as the response. Good for summarization purposes.'
|
||||
this.baseClasses = [this.type, 'ResponseSynthesizer']
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Prompt',
|
||||
name: 'prompt',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
default: `Context information from multiple sources is below.
|
||||
---------------------
|
||||
{context}
|
||||
---------------------
|
||||
Given the information from multiple sources and not prior knowledge, answer the query.
|
||||
Query: {query}
|
||||
Answer:`,
|
||||
warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const prompt = nodeData.inputs?.prompt as string
|
||||
|
||||
const textQAPromptTemplate = ({ context = '', query = '' }) => prompt.replace('{context}', context).replace('{query}', query)
|
||||
|
||||
return new ResponseSynthesizerClass({ textQAPromptTemplate, type: 'TreeSummarize' })
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: TreeSummarize_LlamaIndex }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-tree" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M12 13l-2 -2" /><path d="M12 12l2 -2" /><path d="M12 21v-13" /><path d="M9.824 16a3 3 0 0 1 -2.743 -3.69a3 3 0 0 1 .304 -4.833a3 3 0 0 1 4.615 -3.707a3 3 0 0 1 4.614 3.707a3 3 0 0 1 .305 4.833a3 3 0 0 1 -2.919 3.695h-4z" /></svg>
|
||||
|
After Width: | Height: | Size: 512 B |
|
|
@ -0,0 +1,11 @@
|
|||
export class ResponseSynthesizerClass {
|
||||
type: string
|
||||
textQAPromptTemplate?: any
|
||||
refinePromptTemplate?: any
|
||||
|
||||
constructor(params: { type: string; textQAPromptTemplate?: any; refinePromptTemplate?: any }) {
|
||||
this.type = params.type
|
||||
this.textQAPromptTemplate = params.textQAPromptTemplate
|
||||
this.refinePromptTemplate = params.refinePromptTemplate
|
||||
}
|
||||
}
|
||||
|
|
@ -48,7 +48,7 @@ export class CohereRerank extends BaseDocumentCompressor {
|
|||
doc.metadata.relevance_score = result.relevance_score
|
||||
finalResults.push(doc)
|
||||
})
|
||||
return finalResults
|
||||
return finalResults.splice(0, this.k)
|
||||
} catch (error) {
|
||||
return documents
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,68 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { VectorStoreIndex } from 'llamaindex'
|
||||
|
||||
class QueryEngine_Tools implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
tags: string[]
|
||||
baseClasses: string[]
|
||||
inputs?: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'QueryEngine Tool'
|
||||
this.name = 'queryEngineToolLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'QueryEngineTool'
|
||||
this.icon = 'queryEngineTool.svg'
|
||||
this.category = 'Tools'
|
||||
this.tags = ['LlamaIndex']
|
||||
this.description = 'Tool used to invoke query engine'
|
||||
this.baseClasses = [this.type]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Vector Store Index',
|
||||
name: 'vectorStoreIndex',
|
||||
type: 'VectorStoreIndex'
|
||||
},
|
||||
{
|
||||
label: 'Tool Name',
|
||||
name: 'toolName',
|
||||
type: 'string',
|
||||
description: 'Tool name must be small capital letter with underscore. Ex: my_tool'
|
||||
},
|
||||
{
|
||||
label: 'Tool Description',
|
||||
name: 'toolDesc',
|
||||
type: 'string',
|
||||
rows: 4
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const vectorStoreIndex = nodeData.inputs?.vectorStoreIndex as VectorStoreIndex
|
||||
const toolName = nodeData.inputs?.toolName as string
|
||||
const toolDesc = nodeData.inputs?.toolDesc as string
|
||||
const queryEngineTool = {
|
||||
queryEngine: vectorStoreIndex.asQueryEngine({
|
||||
preFilters: {
|
||||
...(vectorStoreIndex as any).metadatafilter
|
||||
}
|
||||
}),
|
||||
metadata: {
|
||||
name: toolName,
|
||||
description: toolDesc
|
||||
},
|
||||
vectorStoreIndex
|
||||
}
|
||||
|
||||
return queryEngineTool
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: QueryEngine_Tools }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-brand-google-big-query" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M17.73 19.875a2.225 2.225 0 0 1 -1.948 1.125h-7.283a2.222 2.222 0 0 1 -1.947 -1.158l-4.272 -6.75a2.269 2.269 0 0 1 0 -2.184l4.272 -6.75a2.225 2.225 0 0 1 1.946 -1.158h7.285c.809 0 1.554 .443 1.947 1.158l3.98 6.75a2.33 2.33 0 0 1 0 2.25l-3.98 6.75v-.033z" /><path d="M11.5 11.5m-3.5 0a3.5 3.5 0 1 0 7 0a3.5 3.5 0 1 0 -7 0" /><path d="M14 14l2 2" /></svg>
|
||||
|
After Width: | Height: | Size: 654 B |
|
|
@ -0,0 +1,383 @@
|
|||
import {
|
||||
BaseNode,
|
||||
Document,
|
||||
Metadata,
|
||||
VectorStore,
|
||||
VectorStoreQuery,
|
||||
VectorStoreQueryResult,
|
||||
serviceContextFromDefaults,
|
||||
storageContextFromDefaults,
|
||||
VectorStoreIndex,
|
||||
BaseEmbedding
|
||||
} from 'llamaindex'
|
||||
import { FetchResponse, Index, Pinecone, ScoredPineconeRecord } from '@pinecone-database/pinecone'
|
||||
import { flatten } from 'lodash'
|
||||
import { Document as LCDocument } from 'langchain/document'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { flattenObject, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
class PineconeLlamaIndex_VectorStores implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
tags: string[]
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
credential: INodeParams
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Pinecone'
|
||||
this.name = 'pineconeLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'Pinecone'
|
||||
this.icon = 'pinecone.svg'
|
||||
this.category = 'Vector Stores'
|
||||
this.description = `Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database`
|
||||
this.baseClasses = [this.type, 'VectorIndexRetriever']
|
||||
this.tags = ['LlamaIndex']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['pineconeApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
type: 'Document',
|
||||
list: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'Embeddings',
|
||||
name: 'embeddings',
|
||||
type: 'BaseEmbedding_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'Pinecone Index',
|
||||
name: 'pineconeIndex',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Pinecone Namespace',
|
||||
name: 'pineconeNamespace',
|
||||
type: 'string',
|
||||
placeholder: 'my-first-namespace',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Pinecone Metadata Filter',
|
||||
name: 'pineconeMetadataFilter',
|
||||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top K',
|
||||
name: 'topK',
|
||||
description: 'Number of top results to fetch. Default to 4',
|
||||
placeholder: '4',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Pinecone Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Pinecone Vector Store Index',
|
||||
name: 'vectorStore',
|
||||
baseClasses: [this.type, 'VectorStoreIndex']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
vectorStoreMethods = {
|
||||
async upsert(nodeData: INodeData, options: ICommonObject): Promise<void> {
|
||||
const indexName = nodeData.inputs?.pineconeIndex as string
|
||||
const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string
|
||||
const docs = nodeData.inputs?.document as LCDocument[]
|
||||
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
|
||||
const model = nodeData.inputs?.model
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
|
||||
|
||||
const pcvs = new PineconeVectorStore({
|
||||
indexName,
|
||||
apiKey: pineconeApiKey,
|
||||
namespace: pineconeNamespace
|
||||
})
|
||||
|
||||
const flattenDocs = docs && docs.length ? flatten(docs) : []
|
||||
const finalDocs = []
|
||||
for (let i = 0; i < flattenDocs.length; i += 1) {
|
||||
if (flattenDocs[i] && flattenDocs[i].pageContent) {
|
||||
finalDocs.push(new LCDocument(flattenDocs[i]))
|
||||
}
|
||||
}
|
||||
|
||||
const llamadocs: Document[] = []
|
||||
for (const doc of finalDocs) {
|
||||
llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata }))
|
||||
}
|
||||
|
||||
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
|
||||
const storageContext = await storageContextFromDefaults({ vectorStore: pcvs })
|
||||
|
||||
try {
|
||||
await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext })
|
||||
} catch (e) {
|
||||
throw new Error(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const indexName = nodeData.inputs?.pineconeIndex as string
|
||||
const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string
|
||||
const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter
|
||||
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
|
||||
const model = nodeData.inputs?.model
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: PineconeParams = {
|
||||
indexName,
|
||||
apiKey: pineconeApiKey
|
||||
}
|
||||
|
||||
if (pineconeNamespace) obj.namespace = pineconeNamespace
|
||||
|
||||
let metadatafilter = {}
|
||||
if (pineconeMetadataFilter) {
|
||||
metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter)
|
||||
obj.queryFilter = metadatafilter
|
||||
}
|
||||
|
||||
const pcvs = new PineconeVectorStore(obj)
|
||||
|
||||
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
|
||||
const storageContext = await storageContextFromDefaults({ vectorStore: pcvs })
|
||||
|
||||
const index = await VectorStoreIndex.init({
|
||||
nodes: [],
|
||||
storageContext,
|
||||
serviceContext
|
||||
})
|
||||
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
if (output === 'retriever') {
|
||||
const retriever = index.asRetriever()
|
||||
retriever.similarityTopK = k
|
||||
;(retriever as any).serviceContext = serviceContext
|
||||
return retriever
|
||||
} else if (output === 'vectorStore') {
|
||||
;(index as any).k = k
|
||||
if (metadatafilter) {
|
||||
;(index as any).metadatafilter = metadatafilter
|
||||
}
|
||||
return index
|
||||
}
|
||||
return index
|
||||
}
|
||||
}
|
||||
|
||||
type PineconeParams = {
|
||||
indexName: string
|
||||
apiKey: string
|
||||
namespace?: string
|
||||
chunkSize?: number
|
||||
queryFilter?: object
|
||||
}
|
||||
|
||||
class PineconeVectorStore implements VectorStore {
|
||||
storesText: boolean = true
|
||||
db?: Pinecone
|
||||
indexName: string
|
||||
apiKey: string
|
||||
chunkSize: number
|
||||
namespace?: string
|
||||
queryFilter?: object
|
||||
|
||||
constructor(params: PineconeParams) {
|
||||
this.indexName = params?.indexName
|
||||
this.apiKey = params?.apiKey
|
||||
this.namespace = params?.namespace ?? ''
|
||||
this.chunkSize = params?.chunkSize ?? Number.parseInt(process.env.PINECONE_CHUNK_SIZE ?? '100')
|
||||
this.queryFilter = params?.queryFilter ?? {}
|
||||
}
|
||||
|
||||
private async getDb(): Promise<Pinecone> {
|
||||
if (!this.db) {
|
||||
this.db = new Pinecone({
|
||||
apiKey: this.apiKey
|
||||
})
|
||||
}
|
||||
return Promise.resolve(this.db)
|
||||
}
|
||||
|
||||
client() {
|
||||
return this.getDb()
|
||||
}
|
||||
|
||||
async index() {
|
||||
const db: Pinecone = await this.getDb()
|
||||
return db.Index(this.indexName)
|
||||
}
|
||||
|
||||
async clearIndex() {
|
||||
const db: Pinecone = await this.getDb()
|
||||
return await db.index(this.indexName).deleteAll()
|
||||
}
|
||||
|
||||
async add(embeddingResults: BaseNode<Metadata>[]): Promise<string[]> {
|
||||
if (embeddingResults.length == 0) {
|
||||
return Promise.resolve([])
|
||||
}
|
||||
|
||||
const idx: Index = await this.index()
|
||||
const nodes = embeddingResults.map(this.nodeToRecord)
|
||||
|
||||
for (let i = 0; i < nodes.length; i += this.chunkSize) {
|
||||
const chunk = nodes.slice(i, i + this.chunkSize)
|
||||
const result = await this.saveChunk(idx, chunk)
|
||||
if (!result) {
|
||||
return Promise.reject()
|
||||
}
|
||||
}
|
||||
return Promise.resolve([])
|
||||
}
|
||||
|
||||
protected async saveChunk(idx: Index, chunk: any) {
|
||||
try {
|
||||
const namespace = idx.namespace(this.namespace ?? '')
|
||||
await namespace.upsert(chunk)
|
||||
return true
|
||||
} catch (err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
async delete(refDocId: string): Promise<void> {
|
||||
const idx = await this.index()
|
||||
const namespace = idx.namespace(this.namespace ?? '')
|
||||
return namespace.deleteOne(refDocId)
|
||||
}
|
||||
|
||||
async query(query: VectorStoreQuery): Promise<VectorStoreQueryResult> {
|
||||
const queryOptions: any = {
|
||||
vector: query.queryEmbedding,
|
||||
topK: query.similarityTopK,
|
||||
filter: this.queryFilter
|
||||
}
|
||||
|
||||
const idx = await this.index()
|
||||
const namespace = idx.namespace(this.namespace ?? '')
|
||||
const results = await namespace.query(queryOptions)
|
||||
|
||||
const idList = results.matches.map((row) => row.id)
|
||||
const records: FetchResponse<any> = await namespace.fetch(idList)
|
||||
const rows = Object.values(records.records)
|
||||
|
||||
const nodes = rows.map((row) => {
|
||||
return new Document({
|
||||
id_: row.id,
|
||||
text: this.textFromResultRow(row),
|
||||
metadata: this.metaWithoutText(row.metadata),
|
||||
embedding: row.values
|
||||
})
|
||||
})
|
||||
|
||||
const result = {
|
||||
nodes: nodes,
|
||||
similarities: results.matches.map((row) => row.score || 999),
|
||||
ids: results.matches.map((row) => row.id)
|
||||
}
|
||||
|
||||
return Promise.resolve(result)
|
||||
}
|
||||
|
||||
/**
|
||||
* Required by VectorStore interface. Currently ignored.
|
||||
*/
|
||||
persist(): Promise<void> {
|
||||
return Promise.resolve()
|
||||
}
|
||||
|
||||
textFromResultRow(row: ScoredPineconeRecord<Metadata>): string {
|
||||
return row.metadata?.text ?? ''
|
||||
}
|
||||
|
||||
metaWithoutText(meta: Metadata): any {
|
||||
return Object.keys(meta)
|
||||
.filter((key) => key != 'text')
|
||||
.reduce((acc: any, key: string) => {
|
||||
acc[key] = meta[key]
|
||||
return acc
|
||||
}, {})
|
||||
}
|
||||
|
||||
nodeToRecord(node: BaseNode<Metadata>) {
|
||||
let id: any = node.id_.length ? node.id_ : null
|
||||
return {
|
||||
id: id,
|
||||
values: node.getEmbedding(),
|
||||
metadata: {
|
||||
...cleanupMetadata(node.metadata),
|
||||
text: (node as any).text
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const cleanupMetadata = (nodeMetadata: ICommonObject) => {
|
||||
// Pinecone doesn't support nested objects, so we flatten them
|
||||
const documentMetadata: any = { ...nodeMetadata }
|
||||
// preserve string arrays which are allowed
|
||||
const stringArrays: Record<string, string[]> = {}
|
||||
for (const key of Object.keys(documentMetadata)) {
|
||||
if (Array.isArray(documentMetadata[key]) && documentMetadata[key].every((el: any) => typeof el === 'string')) {
|
||||
stringArrays[key] = documentMetadata[key]
|
||||
delete documentMetadata[key]
|
||||
}
|
||||
}
|
||||
const metadata: {
|
||||
[key: string]: string | number | boolean | string[] | null
|
||||
} = {
|
||||
...flattenObject(documentMetadata),
|
||||
...stringArrays
|
||||
}
|
||||
// Pinecone doesn't support null values, so we remove them
|
||||
for (const key of Object.keys(metadata)) {
|
||||
if (metadata[key] == null) {
|
||||
delete metadata[key]
|
||||
} else if (typeof metadata[key] === 'object' && Object.keys(metadata[key] as unknown as object).length === 0) {
|
||||
delete metadata[key]
|
||||
}
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: PineconeLlamaIndex_VectorStores }
|
||||
|
|
@ -0,0 +1,145 @@
|
|||
import path from 'path'
|
||||
import { flatten } from 'lodash'
|
||||
import { storageContextFromDefaults, serviceContextFromDefaults, VectorStoreIndex, Document } from 'llamaindex'
|
||||
import { Document as LCDocument } from 'langchain/document'
|
||||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getUserHome } from '../../../src'
|
||||
|
||||
class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
tags: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'SimpleStore'
|
||||
this.name = 'simpleStoreLlamaIndex'
|
||||
this.version = 1.0
|
||||
this.type = 'SimpleVectorStore'
|
||||
this.icon = 'simplevs.svg'
|
||||
this.category = 'Vector Stores'
|
||||
this.description = 'Upsert embedded data to local path and perform similarity search'
|
||||
this.baseClasses = [this.type, 'VectorIndexRetriever']
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
type: 'Document',
|
||||
list: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'Embeddings',
|
||||
name: 'embeddings',
|
||||
type: 'BaseEmbedding_LlamaIndex'
|
||||
},
|
||||
{
|
||||
label: 'Base Path to store',
|
||||
name: 'basePath',
|
||||
description:
|
||||
'Path to store persist embeddings indexes with persistence. If not specified, default to same path where database is stored',
|
||||
type: 'string',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Top K',
|
||||
name: 'topK',
|
||||
description: 'Number of top results to fetch. Default to 4',
|
||||
placeholder: '4',
|
||||
type: 'number',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'SimpleStore Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'SimpleStore Vector Store Index',
|
||||
name: 'vectorStore',
|
||||
baseClasses: [this.type, 'VectorStoreIndex']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
vectorStoreMethods = {
|
||||
async upsert(nodeData: INodeData): Promise<void> {
|
||||
const basePath = nodeData.inputs?.basePath as string
|
||||
const docs = nodeData.inputs?.document as LCDocument[]
|
||||
const embeddings = nodeData.inputs?.embeddings
|
||||
const model = nodeData.inputs?.model
|
||||
|
||||
let filePath = ''
|
||||
if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex')
|
||||
else filePath = basePath
|
||||
|
||||
const flattenDocs = docs && docs.length ? flatten(docs) : []
|
||||
const finalDocs = []
|
||||
for (let i = 0; i < flattenDocs.length; i += 1) {
|
||||
finalDocs.push(new LCDocument(flattenDocs[i]))
|
||||
}
|
||||
|
||||
const llamadocs: Document[] = []
|
||||
for (const doc of finalDocs) {
|
||||
llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata }))
|
||||
}
|
||||
|
||||
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
|
||||
const storageContext = await storageContextFromDefaults({ persistDir: filePath })
|
||||
|
||||
try {
|
||||
await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext })
|
||||
} catch (e) {
|
||||
throw new Error(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const basePath = nodeData.inputs?.basePath as string
|
||||
const embeddings = nodeData.inputs?.embeddings
|
||||
const model = nodeData.inputs?.model
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
|
||||
let filePath = ''
|
||||
if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex')
|
||||
else filePath = basePath
|
||||
|
||||
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
|
||||
const storageContext = await storageContextFromDefaults({ persistDir: filePath })
|
||||
|
||||
const index = await VectorStoreIndex.init({ storageContext, serviceContext })
|
||||
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
if (output === 'retriever') {
|
||||
const retriever = index.asRetriever()
|
||||
retriever.similarityTopK = k
|
||||
;(retriever as any).serviceContext = serviceContext
|
||||
return retriever
|
||||
} else if (output === 'vectorStore') {
|
||||
;(index as any).k = k
|
||||
return index
|
||||
}
|
||||
return index
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: SimpleStoreUpsert_LlamaIndex_VectorStores }
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-database" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path stroke="none" d="M0 0h24v24H0z" fill="none"></path>
|
||||
<path d="M12 6m-8 0a8 3 0 1 0 16 0a8 3 0 1 0 -16 0"></path>
|
||||
<path d="M4 6v6a8 3 0 0 0 16 0v-6"></path>
|
||||
<path d="M4 12v6a8 3 0 0 0 16 0v-6"></path>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 451 B |
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise-components",
|
||||
"version": "1.5.2",
|
||||
"version": "1.6.0",
|
||||
"description": "Flowiseai Components",
|
||||
"main": "dist/src/index",
|
||||
"types": "dist/src/index.d.ts",
|
||||
|
|
@ -63,9 +63,10 @@
|
|||
"jsonpointer": "^5.0.1",
|
||||
"langchain": "^0.1.7",
|
||||
"langfuse": "2.0.2",
|
||||
"langfuse-langchain": "2.3.3",
|
||||
"langfuse-langchain": "2.6.0",
|
||||
"langsmith": "0.0.63",
|
||||
"linkifyjs": "^4.1.1",
|
||||
"llamaindex": "^0.0.48",
|
||||
"lunary": "^0.6.16",
|
||||
"mammoth": "^1.5.1",
|
||||
"moment": "^2.29.3",
|
||||
|
|
|
|||
|
|
@ -98,6 +98,7 @@ export interface INodeProperties {
|
|||
version: number
|
||||
category: string // TODO: use enum instead of string
|
||||
baseClasses: string[]
|
||||
tags?: string[]
|
||||
description?: string
|
||||
filePath?: string
|
||||
badge?: string
|
||||
|
|
|
|||
|
|
@ -662,6 +662,28 @@ export const convertSchemaToZod = (schema: string | object): ICommonObject => {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flatten nested object
|
||||
* @param {ICommonObject} obj
|
||||
* @param {string} parentKey
|
||||
* @returns {ICommonObject}
|
||||
*/
|
||||
export const flattenObject = (obj: ICommonObject, parentKey?: string) => {
|
||||
let result: any = {}
|
||||
|
||||
Object.keys(obj).forEach((key) => {
|
||||
const value = obj[key]
|
||||
const _key = parentKey ? parentKey + '.' + key : key
|
||||
if (typeof value === 'object') {
|
||||
result = { ...result, ...flattenObject(value, _key) }
|
||||
} else {
|
||||
result[_key] = value
|
||||
}
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert BaseMessage to IMessage
|
||||
* @param {BaseMessage[]} messages
|
||||
|
|
|
|||
|
|
@ -0,0 +1,917 @@
|
|||
{
|
||||
"description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex",
|
||||
"badge": "NEW",
|
||||
"nodes": [
|
||||
{
|
||||
"width": 300,
|
||||
"height": 438,
|
||||
"id": "textFile_0",
|
||||
"position": {
|
||||
"x": 221.215421786192,
|
||||
"y": 94.91489477412404
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "textFile_0",
|
||||
"label": "Text File",
|
||||
"version": 3,
|
||||
"name": "textFile",
|
||||
"type": "Document",
|
||||
"baseClasses": ["Document"],
|
||||
"category": "Document Loaders",
|
||||
"description": "Load data from text files",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Txt File",
|
||||
"name": "txtFile",
|
||||
"type": "file",
|
||||
"fileType": ".txt, .html, .aspx, .asp, .cpp, .c, .cs, .css, .go, .h, .java, .js, .less, .ts, .php, .proto, .python, .py, .rst, .ruby, .rb, .rs, .scala, .sc, .scss, .sol, .sql, .swift, .markdown, .md, .tex, .ltx, .vb, .xml",
|
||||
"id": "textFile_0-input-txtFile-file"
|
||||
},
|
||||
{
|
||||
"label": "Metadata",
|
||||
"name": "metadata",
|
||||
"type": "json",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "textFile_0-input-metadata-json"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Text Splitter",
|
||||
"name": "textSplitter",
|
||||
"type": "TextSplitter",
|
||||
"optional": true,
|
||||
"id": "textFile_0-input-textSplitter-TextSplitter"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"textSplitter": "{{recursiveCharacterTextSplitter_0.data.instance}}",
|
||||
"metadata": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"name": "output",
|
||||
"label": "Output",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"id": "textFile_0-output-document-Document",
|
||||
"name": "document",
|
||||
"label": "Document",
|
||||
"type": "Document"
|
||||
},
|
||||
{
|
||||
"id": "textFile_0-output-text-string|json",
|
||||
"name": "text",
|
||||
"label": "Text",
|
||||
"type": "string | json"
|
||||
}
|
||||
],
|
||||
"default": "document"
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"output": "document"
|
||||
},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 221.215421786192,
|
||||
"y": 94.91489477412404
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 429,
|
||||
"id": "recursiveCharacterTextSplitter_0",
|
||||
"position": {
|
||||
"x": -203.4868320229876,
|
||||
"y": 101.32475976329766
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "recursiveCharacterTextSplitter_0",
|
||||
"label": "Recursive Character Text Splitter",
|
||||
"version": 2,
|
||||
"name": "recursiveCharacterTextSplitter",
|
||||
"type": "RecursiveCharacterTextSplitter",
|
||||
"baseClasses": ["RecursiveCharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer", "Runnable"],
|
||||
"category": "Text Splitters",
|
||||
"description": "Split documents recursively by different characters - starting with \"\\n\\n\", then \"\\n\", then \" \"",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Chunk Size",
|
||||
"name": "chunkSize",
|
||||
"type": "number",
|
||||
"default": 1000,
|
||||
"optional": true,
|
||||
"id": "recursiveCharacterTextSplitter_0-input-chunkSize-number"
|
||||
},
|
||||
{
|
||||
"label": "Chunk Overlap",
|
||||
"name": "chunkOverlap",
|
||||
"type": "number",
|
||||
"optional": true,
|
||||
"id": "recursiveCharacterTextSplitter_0-input-chunkOverlap-number"
|
||||
},
|
||||
{
|
||||
"label": "Custom Separators",
|
||||
"name": "separators",
|
||||
"type": "string",
|
||||
"rows": 4,
|
||||
"description": "Array of custom separators to determine when to split the text, will override the default separators",
|
||||
"placeholder": "[\"|\", \"##\", \">\", \"-\"]",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "recursiveCharacterTextSplitter_0-input-separators-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"chunkSize": 1000,
|
||||
"chunkOverlap": "",
|
||||
"separators": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable",
|
||||
"name": "recursiveCharacterTextSplitter",
|
||||
"label": "RecursiveCharacterTextSplitter",
|
||||
"type": "RecursiveCharacterTextSplitter | TextSplitter | BaseDocumentTransformer | Runnable"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": -203.4868320229876,
|
||||
"y": 101.32475976329766
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 334,
|
||||
"id": "openAIEmbedding_LlamaIndex_0",
|
||||
"position": {
|
||||
"x": 176.27434578083106,
|
||||
"y": 953.3664298122493
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "openAIEmbedding_LlamaIndex_0",
|
||||
"label": "OpenAI Embedding",
|
||||
"version": 1,
|
||||
"name": "openAIEmbedding_LlamaIndex",
|
||||
"type": "OpenAIEmbedding",
|
||||
"baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Embeddings",
|
||||
"description": "OpenAI Embedding specific for LlamaIndex",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["openAIApi"],
|
||||
"id": "openAIEmbedding_LlamaIndex_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Model Name",
|
||||
"name": "modelName",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "text-embedding-3-large",
|
||||
"name": "text-embedding-3-large"
|
||||
},
|
||||
{
|
||||
"label": "text-embedding-3-small",
|
||||
"name": "text-embedding-3-small"
|
||||
},
|
||||
{
|
||||
"label": "text-embedding-ada-002",
|
||||
"name": "text-embedding-ada-002"
|
||||
}
|
||||
],
|
||||
"default": "text-embedding-ada-002",
|
||||
"optional": true,
|
||||
"id": "openAIEmbedding_LlamaIndex_0-input-modelName-options"
|
||||
},
|
||||
{
|
||||
"label": "Timeout",
|
||||
"name": "timeout",
|
||||
"type": "number",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "openAIEmbedding_LlamaIndex_0-input-timeout-number"
|
||||
},
|
||||
{
|
||||
"label": "BasePath",
|
||||
"name": "basepath",
|
||||
"type": "string",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "openAIEmbedding_LlamaIndex_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"timeout": "",
|
||||
"basepath": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
|
||||
"name": "openAIEmbedding_LlamaIndex",
|
||||
"label": "OpenAIEmbedding",
|
||||
"type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 176.27434578083106,
|
||||
"y": 953.3664298122493
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 585,
|
||||
"id": "pineconeLlamaIndex_0",
|
||||
"position": {
|
||||
"x": 609.3087433345761,
|
||||
"y": 488.2141798951578
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "pineconeLlamaIndex_0",
|
||||
"label": "Pinecone",
|
||||
"version": 1,
|
||||
"name": "pineconeLlamaIndex",
|
||||
"type": "Pinecone",
|
||||
"baseClasses": ["Pinecone", "VectorIndexRetriever"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["pineconeApi"],
|
||||
"id": "pineconeLlamaIndex_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Pinecone Index",
|
||||
"name": "pineconeIndex",
|
||||
"type": "string",
|
||||
"id": "pineconeLlamaIndex_0-input-pineconeIndex-string"
|
||||
},
|
||||
{
|
||||
"label": "Pinecone Namespace",
|
||||
"name": "pineconeNamespace",
|
||||
"type": "string",
|
||||
"placeholder": "my-first-namespace",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pineconeLlamaIndex_0-input-pineconeNamespace-string"
|
||||
},
|
||||
{
|
||||
"label": "Pinecone Metadata Filter",
|
||||
"name": "pineconeMetadataFilter",
|
||||
"type": "json",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json"
|
||||
},
|
||||
{
|
||||
"label": "Top K",
|
||||
"name": "topK",
|
||||
"description": "Number of top results to fetch. Default to 4",
|
||||
"placeholder": "4",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pineconeLlamaIndex_0-input-topK-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Document",
|
||||
"name": "document",
|
||||
"type": "Document",
|
||||
"list": true,
|
||||
"optional": true,
|
||||
"id": "pineconeLlamaIndex_0-input-document-Document"
|
||||
},
|
||||
{
|
||||
"label": "Chat Model",
|
||||
"name": "model",
|
||||
"type": "BaseChatModel_LlamaIndex",
|
||||
"id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex"
|
||||
},
|
||||
{
|
||||
"label": "Embeddings",
|
||||
"name": "embeddings",
|
||||
"type": "BaseEmbedding_LlamaIndex",
|
||||
"id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"document": ["{{textFile_0.data.instance}}"],
|
||||
"model": "{{chatOpenAI_LlamaIndex_1.data.instance}}",
|
||||
"embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}",
|
||||
"pineconeIndex": "",
|
||||
"pineconeNamespace": "",
|
||||
"pineconeMetadataFilter": "",
|
||||
"topK": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"name": "output",
|
||||
"label": "Output",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever",
|
||||
"name": "retriever",
|
||||
"label": "Pinecone Retriever",
|
||||
"type": "Pinecone | VectorIndexRetriever"
|
||||
},
|
||||
{
|
||||
"id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex",
|
||||
"name": "vectorStore",
|
||||
"label": "Pinecone Vector Store Index",
|
||||
"type": "Pinecone | VectorStoreIndex"
|
||||
}
|
||||
],
|
||||
"default": "retriever"
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"output": "retriever"
|
||||
},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 609.3087433345761,
|
||||
"y": 488.2141798951578
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 529,
|
||||
"id": "chatOpenAI_LlamaIndex_1",
|
||||
"position": {
|
||||
"x": -195.15244974578656,
|
||||
"y": 584.9467028201428
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_LlamaIndex_1",
|
||||
"label": "ChatOpenAI",
|
||||
"version": 1,
|
||||
"name": "chatOpenAI_LlamaIndex",
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Chat Models",
|
||||
"description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["openAIApi"],
|
||||
"id": "chatOpenAI_LlamaIndex_1-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Model Name",
|
||||
"name": "modelName",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "gpt-4",
|
||||
"name": "gpt-4"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-turbo-preview",
|
||||
"name": "gpt-4-turbo-preview"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-0125-preview",
|
||||
"name": "gpt-4-0125-preview"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-1106-preview",
|
||||
"name": "gpt-4-1106-preview"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-vision-preview",
|
||||
"name": "gpt-4-vision-preview"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-0613",
|
||||
"name": "gpt-4-0613"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-32k",
|
||||
"name": "gpt-4-32k"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-32k-0613",
|
||||
"name": "gpt-4-32k-0613"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo",
|
||||
"name": "gpt-3.5-turbo"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo-1106",
|
||||
"name": "gpt-3.5-turbo-1106"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo-0613",
|
||||
"name": "gpt-3.5-turbo-0613"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo-16k",
|
||||
"name": "gpt-3.5-turbo-16k"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo-16k-0613",
|
||||
"name": "gpt-3.5-turbo-16k-0613"
|
||||
}
|
||||
],
|
||||
"default": "gpt-3.5-turbo",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_LlamaIndex_1-input-modelName-options"
|
||||
},
|
||||
{
|
||||
"label": "Temperature",
|
||||
"name": "temperature",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"default": 0.9,
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_LlamaIndex_1-input-temperature-number"
|
||||
},
|
||||
{
|
||||
"label": "Max Tokens",
|
||||
"name": "maxTokens",
|
||||
"type": "number",
|
||||
"step": 1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number"
|
||||
},
|
||||
{
|
||||
"label": "Top Probability",
|
||||
"name": "topP",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_LlamaIndex_1-input-topP-number"
|
||||
},
|
||||
{
|
||||
"label": "Timeout",
|
||||
"name": "timeout",
|
||||
"type": "number",
|
||||
"step": 1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_LlamaIndex_1-input-timeout-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-16k",
|
||||
"temperature": 0.9,
|
||||
"maxTokens": "",
|
||||
"topP": "",
|
||||
"timeout": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
|
||||
"name": "chatOpenAI_LlamaIndex",
|
||||
"label": "ChatOpenAI",
|
||||
"type": "ChatOpenAI | BaseChatModel_LlamaIndex"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": -195.15244974578656,
|
||||
"y": 584.9467028201428
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 513,
|
||||
"id": "contextChatEngine_0",
|
||||
"position": {
|
||||
"x": 1550.2553933740128,
|
||||
"y": 270.7914631777829
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "contextChatEngine_0",
|
||||
"label": "Context Chat Engine",
|
||||
"version": 1,
|
||||
"name": "contextChatEngine",
|
||||
"type": "ContextChatEngine",
|
||||
"baseClasses": ["ContextChatEngine"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Engine",
|
||||
"description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Return Source Documents",
|
||||
"name": "returnSourceDocuments",
|
||||
"type": "boolean",
|
||||
"optional": true,
|
||||
"id": "contextChatEngine_0-input-returnSourceDocuments-boolean"
|
||||
},
|
||||
{
|
||||
"label": "System Message",
|
||||
"name": "systemMessagePrompt",
|
||||
"type": "string",
|
||||
"rows": 4,
|
||||
"optional": true,
|
||||
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.",
|
||||
"id": "contextChatEngine_0-input-systemMessagePrompt-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Chat Model",
|
||||
"name": "model",
|
||||
"type": "BaseChatModel_LlamaIndex",
|
||||
"id": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex"
|
||||
},
|
||||
{
|
||||
"label": "Vector Store Retriever",
|
||||
"name": "vectorStoreRetriever",
|
||||
"type": "VectorIndexRetriever",
|
||||
"id": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever"
|
||||
},
|
||||
{
|
||||
"label": "Memory",
|
||||
"name": "memory",
|
||||
"type": "BaseChatMemory",
|
||||
"id": "contextChatEngine_0-input-memory-BaseChatMemory"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"model": "{{chatOpenAI_LlamaIndex_2.data.instance}}",
|
||||
"vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}",
|
||||
"memory": "{{RedisBackedChatMemory_0.data.instance}}",
|
||||
"systemMessagePrompt": "",
|
||||
"returnSourceDocuments": true
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "contextChatEngine_0-output-contextChatEngine-ContextChatEngine",
|
||||
"name": "contextChatEngine",
|
||||
"label": "ContextChatEngine",
|
||||
"type": "ContextChatEngine"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1550.2553933740128,
|
||||
"y": 270.7914631777829
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 329,
|
||||
"id": "RedisBackedChatMemory_0",
|
||||
"position": {
|
||||
"x": 1081.252815805786,
|
||||
"y": 990.1701092562037
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "RedisBackedChatMemory_0",
|
||||
"label": "Redis-Backed Chat Memory",
|
||||
"version": 2,
|
||||
"name": "RedisBackedChatMemory",
|
||||
"type": "RedisBackedChatMemory",
|
||||
"baseClasses": ["RedisBackedChatMemory", "BaseChatMemory", "BaseMemory"],
|
||||
"category": "Memory",
|
||||
"description": "Summarizes the conversation and stores the memory in Redis server",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"optional": true,
|
||||
"credentialNames": ["redisCacheApi", "redisCacheUrlApi"],
|
||||
"id": "RedisBackedChatMemory_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Session Id",
|
||||
"name": "sessionId",
|
||||
"type": "string",
|
||||
"description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat\">more</a>",
|
||||
"default": "",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "RedisBackedChatMemory_0-input-sessionId-string"
|
||||
},
|
||||
{
|
||||
"label": "Session Timeouts",
|
||||
"name": "sessionTTL",
|
||||
"type": "number",
|
||||
"description": "Omit this parameter to make sessions never expire",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "RedisBackedChatMemory_0-input-sessionTTL-number"
|
||||
},
|
||||
{
|
||||
"label": "Memory Key",
|
||||
"name": "memoryKey",
|
||||
"type": "string",
|
||||
"default": "chat_history",
|
||||
"additionalParams": true,
|
||||
"id": "RedisBackedChatMemory_0-input-memoryKey-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"sessionId": "",
|
||||
"sessionTTL": "",
|
||||
"memoryKey": "chat_history"
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory",
|
||||
"name": "RedisBackedChatMemory",
|
||||
"label": "RedisBackedChatMemory",
|
||||
"type": "RedisBackedChatMemory | BaseChatMemory | BaseMemory"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"dragging": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1081.252815805786,
|
||||
"y": 990.1701092562037
|
||||
}
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 529,
|
||||
"id": "chatOpenAI_LlamaIndex_2",
|
||||
"position": {
|
||||
"x": 1015.1605888108386,
|
||||
"y": -38.31143117572401
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_LlamaIndex_2",
|
||||
"label": "ChatOpenAI",
|
||||
"version": 1,
|
||||
"name": "chatOpenAI_LlamaIndex",
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Chat Models",
|
||||
"description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["openAIApi"],
|
||||
"id": "chatOpenAI_LlamaIndex_2-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Model Name",
|
||||
"name": "modelName",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "gpt-4",
|
||||
"name": "gpt-4"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-turbo-preview",
|
||||
"name": "gpt-4-turbo-preview"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-0125-preview",
|
||||
"name": "gpt-4-0125-preview"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-1106-preview",
|
||||
"name": "gpt-4-1106-preview"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-vision-preview",
|
||||
"name": "gpt-4-vision-preview"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-0613",
|
||||
"name": "gpt-4-0613"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-32k",
|
||||
"name": "gpt-4-32k"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-32k-0613",
|
||||
"name": "gpt-4-32k-0613"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo",
|
||||
"name": "gpt-3.5-turbo"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo-1106",
|
||||
"name": "gpt-3.5-turbo-1106"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo-0613",
|
||||
"name": "gpt-3.5-turbo-0613"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo-16k",
|
||||
"name": "gpt-3.5-turbo-16k"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo-16k-0613",
|
||||
"name": "gpt-3.5-turbo-16k-0613"
|
||||
}
|
||||
],
|
||||
"default": "gpt-3.5-turbo",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_LlamaIndex_2-input-modelName-options"
|
||||
},
|
||||
{
|
||||
"label": "Temperature",
|
||||
"name": "temperature",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"default": 0.9,
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_LlamaIndex_2-input-temperature-number"
|
||||
},
|
||||
{
|
||||
"label": "Max Tokens",
|
||||
"name": "maxTokens",
|
||||
"type": "number",
|
||||
"step": 1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_LlamaIndex_2-input-maxTokens-number"
|
||||
},
|
||||
{
|
||||
"label": "Top Probability",
|
||||
"name": "topP",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_LlamaIndex_2-input-topP-number"
|
||||
},
|
||||
{
|
||||
"label": "Timeout",
|
||||
"name": "timeout",
|
||||
"type": "number",
|
||||
"step": 1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_LlamaIndex_2-input-timeout-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
"maxTokens": "",
|
||||
"topP": "",
|
||||
"timeout": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
|
||||
"name": "chatOpenAI_LlamaIndex",
|
||||
"label": "ChatOpenAI",
|
||||
"type": "ChatOpenAI | BaseChatModel_LlamaIndex"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1015.1605888108386,
|
||||
"y": -38.31143117572401
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source": "recursiveCharacterTextSplitter_0",
|
||||
"sourceHandle": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable",
|
||||
"target": "textFile_0",
|
||||
"targetHandle": "textFile_0-input-textSplitter-TextSplitter",
|
||||
"type": "buttonedge",
|
||||
"id": "recursiveCharacterTextSplitter_0-recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable-textFile_0-textFile_0-input-textSplitter-TextSplitter",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "textFile_0",
|
||||
"sourceHandle": "textFile_0-output-document-Document",
|
||||
"target": "pineconeLlamaIndex_0",
|
||||
"targetHandle": "pineconeLlamaIndex_0-input-document-Document",
|
||||
"type": "buttonedge",
|
||||
"id": "textFile_0-textFile_0-output-document-Document-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "chatOpenAI_LlamaIndex_1",
|
||||
"sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
|
||||
"target": "pineconeLlamaIndex_0",
|
||||
"targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
|
||||
"type": "buttonedge",
|
||||
"id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "openAIEmbedding_LlamaIndex_0",
|
||||
"sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
|
||||
"target": "pineconeLlamaIndex_0",
|
||||
"targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
|
||||
"type": "buttonedge",
|
||||
"id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "pineconeLlamaIndex_0",
|
||||
"sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever",
|
||||
"target": "contextChatEngine_0",
|
||||
"targetHandle": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
|
||||
"type": "buttonedge",
|
||||
"id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-contextChatEngine_0-contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "RedisBackedChatMemory_0",
|
||||
"sourceHandle": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory",
|
||||
"target": "contextChatEngine_0",
|
||||
"targetHandle": "contextChatEngine_0-input-memory-BaseChatMemory",
|
||||
"type": "buttonedge",
|
||||
"id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-contextChatEngine_0-contextChatEngine_0-input-memory-BaseChatMemory",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "chatOpenAI_LlamaIndex_2",
|
||||
"sourceHandle": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
|
||||
"target": "contextChatEngine_0",
|
||||
"targetHandle": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex",
|
||||
"type": "buttonedge",
|
||||
"id": "chatOpenAI_LlamaIndex_2-chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-contextChatEngine_0-contextChatEngine_0-input-model-BaseChatModel_LlamaIndex",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -190,10 +190,10 @@
|
|||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"id": "vectorStoreToDocument_0-output-document-Document",
|
||||
"id": "vectorStoreToDocument_0-output-document-Document|json",
|
||||
"name": "document",
|
||||
"label": "Document",
|
||||
"type": "Document"
|
||||
"type": "Document | json"
|
||||
},
|
||||
{
|
||||
"id": "vectorStoreToDocument_0-output-text-string|json",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,547 @@
|
|||
{
|
||||
"description": "Stateless query engine designed to answer question over your data using LlamaIndex",
|
||||
"badge": "NEW",
|
||||
"nodes": [
|
||||
{
|
||||
"width": 300,
|
||||
"height": 382,
|
||||
"id": "queryEngine_0",
|
||||
"position": {
|
||||
"x": 1407.9610494306783,
|
||||
"y": 241.12144405808692
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "queryEngine_0",
|
||||
"label": "Query Engine",
|
||||
"version": 1,
|
||||
"name": "queryEngine",
|
||||
"type": "QueryEngine",
|
||||
"baseClasses": ["QueryEngine"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Engine",
|
||||
"description": "Simple query engine built to answer question over your data, without memory",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Return Source Documents",
|
||||
"name": "returnSourceDocuments",
|
||||
"type": "boolean",
|
||||
"optional": true,
|
||||
"id": "queryEngine_0-input-returnSourceDocuments-boolean"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Vector Store Retriever",
|
||||
"name": "vectorStoreRetriever",
|
||||
"type": "VectorIndexRetriever",
|
||||
"id": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever"
|
||||
},
|
||||
{
|
||||
"label": "Response Synthesizer",
|
||||
"name": "responseSynthesizer",
|
||||
"type": "ResponseSynthesizer",
|
||||
"description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target=\"_blank\" href=\"https://ts.llamaindex.ai/modules/low_level/response_synthesizer\">more</a>",
|
||||
"optional": true,
|
||||
"id": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}",
|
||||
"responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}",
|
||||
"returnSourceDocuments": true
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "queryEngine_0-output-queryEngine-QueryEngine",
|
||||
"name": "queryEngine",
|
||||
"label": "QueryEngine",
|
||||
"type": "QueryEngine"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1407.9610494306783,
|
||||
"y": 241.12144405808692
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 585,
|
||||
"id": "pineconeLlamaIndex_0",
|
||||
"position": {
|
||||
"x": 977.3886641397302,
|
||||
"y": -261.2253031641797
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "pineconeLlamaIndex_0",
|
||||
"label": "Pinecone",
|
||||
"version": 1,
|
||||
"name": "pineconeLlamaIndex",
|
||||
"type": "Pinecone",
|
||||
"baseClasses": ["Pinecone", "VectorIndexRetriever"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["pineconeApi"],
|
||||
"id": "pineconeLlamaIndex_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Pinecone Index",
|
||||
"name": "pineconeIndex",
|
||||
"type": "string",
|
||||
"id": "pineconeLlamaIndex_0-input-pineconeIndex-string"
|
||||
},
|
||||
{
|
||||
"label": "Pinecone Namespace",
|
||||
"name": "pineconeNamespace",
|
||||
"type": "string",
|
||||
"placeholder": "my-first-namespace",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pineconeLlamaIndex_0-input-pineconeNamespace-string"
|
||||
},
|
||||
{
|
||||
"label": "Pinecone Metadata Filter",
|
||||
"name": "pineconeMetadataFilter",
|
||||
"type": "json",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json"
|
||||
},
|
||||
{
|
||||
"label": "Top K",
|
||||
"name": "topK",
|
||||
"description": "Number of top results to fetch. Default to 4",
|
||||
"placeholder": "4",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pineconeLlamaIndex_0-input-topK-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Document",
|
||||
"name": "document",
|
||||
"type": "Document",
|
||||
"list": true,
|
||||
"optional": true,
|
||||
"id": "pineconeLlamaIndex_0-input-document-Document"
|
||||
},
|
||||
{
|
||||
"label": "Chat Model",
|
||||
"name": "model",
|
||||
"type": "BaseChatModel_LlamaIndex",
|
||||
"id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex"
|
||||
},
|
||||
{
|
||||
"label": "Embeddings",
|
||||
"name": "embeddings",
|
||||
"type": "BaseEmbedding_LlamaIndex",
|
||||
"id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"document": "",
|
||||
"model": "{{chatAnthropic_LlamaIndex_0.data.instance}}",
|
||||
"embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}",
|
||||
"pineconeIndex": "",
|
||||
"pineconeNamespace": "",
|
||||
"pineconeMetadataFilter": "",
|
||||
"topK": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"name": "output",
|
||||
"label": "Output",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever",
|
||||
"name": "retriever",
|
||||
"label": "Pinecone Retriever",
|
||||
"type": "Pinecone | VectorIndexRetriever"
|
||||
},
|
||||
{
|
||||
"id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex",
|
||||
"name": "vectorStore",
|
||||
"label": "Pinecone Vector Store Index",
|
||||
"type": "Pinecone | VectorStoreIndex"
|
||||
}
|
||||
],
|
||||
"default": "retriever"
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"output": "retriever"
|
||||
},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 977.3886641397302,
|
||||
"y": -261.2253031641797
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 334,
|
||||
"id": "openAIEmbedding_LlamaIndex_0",
|
||||
"position": {
|
||||
"x": 529.8690713844503,
|
||||
"y": -18.955726653613254
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "openAIEmbedding_LlamaIndex_0",
|
||||
"label": "OpenAI Embedding",
|
||||
"version": 1,
|
||||
"name": "openAIEmbedding_LlamaIndex",
|
||||
"type": "OpenAIEmbedding",
|
||||
"baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Embeddings",
|
||||
"description": "OpenAI Embedding specific for LlamaIndex",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["openAIApi"],
|
||||
"id": "openAIEmbedding_LlamaIndex_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Model Name",
|
||||
"name": "modelName",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "text-embedding-3-large",
|
||||
"name": "text-embedding-3-large"
|
||||
},
|
||||
{
|
||||
"label": "text-embedding-3-small",
|
||||
"name": "text-embedding-3-small"
|
||||
},
|
||||
{
|
||||
"label": "text-embedding-ada-002",
|
||||
"name": "text-embedding-ada-002"
|
||||
}
|
||||
],
|
||||
"default": "text-embedding-ada-002",
|
||||
"optional": true,
|
||||
"id": "openAIEmbedding_LlamaIndex_0-input-modelName-options"
|
||||
},
|
||||
{
|
||||
"label": "Timeout",
|
||||
"name": "timeout",
|
||||
"type": "number",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "openAIEmbedding_LlamaIndex_0-input-timeout-number"
|
||||
},
|
||||
{
|
||||
"label": "BasePath",
|
||||
"name": "basepath",
|
||||
"type": "string",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "openAIEmbedding_LlamaIndex_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"timeout": "",
|
||||
"basepath": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
|
||||
"name": "openAIEmbedding_LlamaIndex",
|
||||
"label": "OpenAIEmbedding",
|
||||
"type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 529.8690713844503,
|
||||
"y": -18.955726653613254
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 749,
|
||||
"id": "compactrefineLlamaIndex_0",
|
||||
"position": {
|
||||
"x": 170.71031618977543,
|
||||
"y": -33.83233752386292
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "compactrefineLlamaIndex_0",
|
||||
"label": "Compact and Refine",
|
||||
"version": 1,
|
||||
"name": "compactrefineLlamaIndex",
|
||||
"type": "CompactRefine",
|
||||
"baseClasses": ["CompactRefine", "ResponseSynthesizer"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Response Synthesizer",
|
||||
"description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Refine Prompt",
|
||||
"name": "refinePrompt",
|
||||
"type": "string",
|
||||
"rows": 4,
|
||||
"default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:",
|
||||
"warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}",
|
||||
"optional": true,
|
||||
"id": "compactrefineLlamaIndex_0-input-refinePrompt-string"
|
||||
},
|
||||
{
|
||||
"label": "Text QA Prompt",
|
||||
"name": "textQAPrompt",
|
||||
"type": "string",
|
||||
"rows": 4,
|
||||
"default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:",
|
||||
"warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}",
|
||||
"optional": true,
|
||||
"id": "compactrefineLlamaIndex_0-input-textQAPrompt-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"refinePrompt": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:",
|
||||
"textQAPrompt": "Context information:\n<context>\n{context}\n</context>\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}"
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer",
|
||||
"name": "compactrefineLlamaIndex",
|
||||
"label": "CompactRefine",
|
||||
"type": "CompactRefine | ResponseSynthesizer"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 170.71031618977543,
|
||||
"y": -33.83233752386292
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 529,
|
||||
"id": "chatAnthropic_LlamaIndex_0",
|
||||
"position": {
|
||||
"x": 521.3530883359147,
|
||||
"y": -584.8241219614786
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatAnthropic_LlamaIndex_0",
|
||||
"label": "ChatAnthropic",
|
||||
"version": 1,
|
||||
"name": "chatAnthropic_LlamaIndex",
|
||||
"type": "ChatAnthropic",
|
||||
"baseClasses": ["ChatAnthropic", "BaseChatModel_LlamaIndex"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Chat Models",
|
||||
"description": "Wrapper around ChatAnthropic LLM specific for LlamaIndex",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["anthropicApi"],
|
||||
"id": "chatAnthropic_LlamaIndex_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Model Name",
|
||||
"name": "modelName",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "claude-2",
|
||||
"name": "claude-2",
|
||||
"description": "Claude 2 latest major version, automatically get updates to the model as they are released"
|
||||
},
|
||||
{
|
||||
"label": "claude-2.1",
|
||||
"name": "claude-2.1",
|
||||
"description": "Claude 2 latest full version"
|
||||
},
|
||||
{
|
||||
"label": "claude-instant-1",
|
||||
"name": "claude-instant-1",
|
||||
"description": "Claude Instant latest major version, automatically get updates to the model as they are released"
|
||||
},
|
||||
{
|
||||
"label": "claude-v1",
|
||||
"name": "claude-v1"
|
||||
},
|
||||
{
|
||||
"label": "claude-v1-100k",
|
||||
"name": "claude-v1-100k"
|
||||
},
|
||||
{
|
||||
"label": "claude-v1.0",
|
||||
"name": "claude-v1.0"
|
||||
},
|
||||
{
|
||||
"label": "claude-v1.2",
|
||||
"name": "claude-v1.2"
|
||||
},
|
||||
{
|
||||
"label": "claude-v1.3",
|
||||
"name": "claude-v1.3"
|
||||
},
|
||||
{
|
||||
"label": "claude-v1.3-100k",
|
||||
"name": "claude-v1.3-100k"
|
||||
},
|
||||
{
|
||||
"label": "claude-instant-v1",
|
||||
"name": "claude-instant-v1"
|
||||
},
|
||||
{
|
||||
"label": "claude-instant-v1-100k",
|
||||
"name": "claude-instant-v1-100k"
|
||||
},
|
||||
{
|
||||
"label": "claude-instant-v1.0",
|
||||
"name": "claude-instant-v1.0"
|
||||
},
|
||||
{
|
||||
"label": "claude-instant-v1.1",
|
||||
"name": "claude-instant-v1.1"
|
||||
},
|
||||
{
|
||||
"label": "claude-instant-v1.1-100k",
|
||||
"name": "claude-instant-v1.1-100k"
|
||||
}
|
||||
],
|
||||
"default": "claude-2",
|
||||
"optional": true,
|
||||
"id": "chatAnthropic_LlamaIndex_0-input-modelName-options"
|
||||
},
|
||||
{
|
||||
"label": "Temperature",
|
||||
"name": "temperature",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"default": 0.9,
|
||||
"optional": true,
|
||||
"id": "chatAnthropic_LlamaIndex_0-input-temperature-number"
|
||||
},
|
||||
{
|
||||
"label": "Max Tokens",
|
||||
"name": "maxTokensToSample",
|
||||
"type": "number",
|
||||
"step": 1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatAnthropic_LlamaIndex_0-input-maxTokensToSample-number"
|
||||
},
|
||||
{
|
||||
"label": "Top P",
|
||||
"name": "topP",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatAnthropic_LlamaIndex_0-input-topP-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"modelName": "claude-2",
|
||||
"temperature": 0.9,
|
||||
"maxTokensToSample": "",
|
||||
"topP": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex",
|
||||
"name": "chatAnthropic_LlamaIndex",
|
||||
"label": "ChatAnthropic",
|
||||
"type": "ChatAnthropic | BaseChatModel_LlamaIndex"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 521.3530883359147,
|
||||
"y": -584.8241219614786
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source": "pineconeLlamaIndex_0",
|
||||
"sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever",
|
||||
"target": "queryEngine_0",
|
||||
"targetHandle": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
|
||||
"type": "buttonedge",
|
||||
"id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-queryEngine_0-queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "openAIEmbedding_LlamaIndex_0",
|
||||
"sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
|
||||
"target": "pineconeLlamaIndex_0",
|
||||
"targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
|
||||
"type": "buttonedge",
|
||||
"id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "compactrefineLlamaIndex_0",
|
||||
"sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer",
|
||||
"target": "queryEngine_0",
|
||||
"targetHandle": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer",
|
||||
"type": "buttonedge",
|
||||
"id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-queryEngine_0-queryEngine_0-input-responseSynthesizer-ResponseSynthesizer",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "chatAnthropic_LlamaIndex_0",
|
||||
"sourceHandle": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex",
|
||||
"target": "pineconeLlamaIndex_0",
|
||||
"targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
|
||||
"type": "buttonedge",
|
||||
"id": "chatAnthropic_LlamaIndex_0-chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,270 @@
|
|||
{
|
||||
"description": "Simple chat engine to handle back and forth conversations using LlamaIndex",
|
||||
"badge": "NEW",
|
||||
"nodes": [
|
||||
{
|
||||
"width": 300,
|
||||
"height": 462,
|
||||
"id": "simpleChatEngine_0",
|
||||
"position": {
|
||||
"x": 1210.127368000538,
|
||||
"y": 324.98110560103896
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "simpleChatEngine_0",
|
||||
"label": "Simple Chat Engine",
|
||||
"version": 1,
|
||||
"name": "simpleChatEngine",
|
||||
"type": "SimpleChatEngine",
|
||||
"baseClasses": ["SimpleChatEngine"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Engine",
|
||||
"description": "Simple engine to handle back and forth conversations",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "System Message",
|
||||
"name": "systemMessagePrompt",
|
||||
"type": "string",
|
||||
"rows": 4,
|
||||
"optional": true,
|
||||
"placeholder": "You are a helpful assistant",
|
||||
"id": "simpleChatEngine_0-input-systemMessagePrompt-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Chat Model",
|
||||
"name": "model",
|
||||
"type": "BaseChatModel_LlamaIndex",
|
||||
"id": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex"
|
||||
},
|
||||
{
|
||||
"label": "Memory",
|
||||
"name": "memory",
|
||||
"type": "BaseChatMemory",
|
||||
"id": "simpleChatEngine_0-input-memory-BaseChatMemory"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"model": "{{azureChatOpenAI_LlamaIndex_0.data.instance}}",
|
||||
"memory": "{{bufferMemory_0.data.instance}}",
|
||||
"systemMessagePrompt": "You are a helpful assistant."
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "simpleChatEngine_0-output-simpleChatEngine-SimpleChatEngine",
|
||||
"name": "simpleChatEngine",
|
||||
"label": "SimpleChatEngine",
|
||||
"type": "SimpleChatEngine"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"dragging": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1210.127368000538,
|
||||
"y": 324.98110560103896
|
||||
}
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 376,
|
||||
"id": "bufferMemory_0",
|
||||
"position": {
|
||||
"x": 393.9823478014782,
|
||||
"y": 415.7414943210391
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "bufferMemory_0",
|
||||
"label": "Buffer Memory",
|
||||
"version": 1,
|
||||
"name": "bufferMemory",
|
||||
"type": "BufferMemory",
|
||||
"baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"],
|
||||
"category": "Memory",
|
||||
"description": "Remembers previous conversational back and forths directly",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Memory Key",
|
||||
"name": "memoryKey",
|
||||
"type": "string",
|
||||
"default": "chat_history",
|
||||
"id": "bufferMemory_0-input-memoryKey-string"
|
||||
},
|
||||
{
|
||||
"label": "Input Key",
|
||||
"name": "inputKey",
|
||||
"type": "string",
|
||||
"default": "input",
|
||||
"id": "bufferMemory_0-input-inputKey-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"memoryKey": "chat_history",
|
||||
"inputKey": "input"
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
|
||||
"name": "bufferMemory",
|
||||
"label": "BufferMemory",
|
||||
"type": "BufferMemory | BaseChatMemory | BaseMemory"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 393.9823478014782,
|
||||
"y": 415.7414943210391
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 529,
|
||||
"id": "azureChatOpenAI_LlamaIndex_0",
|
||||
"position": {
|
||||
"x": 746.5530862509605,
|
||||
"y": -54.107978373323306
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "azureChatOpenAI_LlamaIndex_0",
|
||||
"label": "AzureChatOpenAI",
|
||||
"version": 1,
|
||||
"name": "azureChatOpenAI_LlamaIndex",
|
||||
"type": "AzureChatOpenAI",
|
||||
"baseClasses": ["AzureChatOpenAI", "BaseChatModel_LlamaIndex"],
|
||||
"tags": ["LlamaIndex"],
|
||||
"category": "Chat Models",
|
||||
"description": "Wrapper around Azure OpenAI Chat LLM specific for LlamaIndex",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["azureOpenAIApi"],
|
||||
"id": "azureChatOpenAI_LlamaIndex_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Model Name",
|
||||
"name": "modelName",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "gpt-4",
|
||||
"name": "gpt-4"
|
||||
},
|
||||
{
|
||||
"label": "gpt-4-32k",
|
||||
"name": "gpt-4-32k"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo",
|
||||
"name": "gpt-3.5-turbo"
|
||||
},
|
||||
{
|
||||
"label": "gpt-3.5-turbo-16k",
|
||||
"name": "gpt-3.5-turbo-16k"
|
||||
}
|
||||
],
|
||||
"default": "gpt-3.5-turbo-16k",
|
||||
"optional": true,
|
||||
"id": "azureChatOpenAI_LlamaIndex_0-input-modelName-options"
|
||||
},
|
||||
{
|
||||
"label": "Temperature",
|
||||
"name": "temperature",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"default": 0.9,
|
||||
"optional": true,
|
||||
"id": "azureChatOpenAI_LlamaIndex_0-input-temperature-number"
|
||||
},
|
||||
{
|
||||
"label": "Max Tokens",
|
||||
"name": "maxTokens",
|
||||
"type": "number",
|
||||
"step": 1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "azureChatOpenAI_LlamaIndex_0-input-maxTokens-number"
|
||||
},
|
||||
{
|
||||
"label": "Top Probability",
|
||||
"name": "topP",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "azureChatOpenAI_LlamaIndex_0-input-topP-number"
|
||||
},
|
||||
{
|
||||
"label": "Timeout",
|
||||
"name": "timeout",
|
||||
"type": "number",
|
||||
"step": 1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "azureChatOpenAI_LlamaIndex_0-input-timeout-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-16k",
|
||||
"temperature": 0.9,
|
||||
"maxTokens": "",
|
||||
"topP": "",
|
||||
"timeout": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex",
|
||||
"name": "azureChatOpenAI_LlamaIndex",
|
||||
"label": "AzureChatOpenAI",
|
||||
"type": "AzureChatOpenAI | BaseChatModel_LlamaIndex"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 746.5530862509605,
|
||||
"y": -54.107978373323306
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source": "bufferMemory_0",
|
||||
"sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
|
||||
"target": "simpleChatEngine_0",
|
||||
"targetHandle": "simpleChatEngine_0-input-memory-BaseChatMemory",
|
||||
"type": "buttonedge",
|
||||
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-simpleChatEngine_0-simpleChatEngine_0-input-memory-BaseChatMemory",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "azureChatOpenAI_LlamaIndex_0",
|
||||
"sourceHandle": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex",
|
||||
"target": "simpleChatEngine_0",
|
||||
"targetHandle": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex",
|
||||
"type": "buttonedge",
|
||||
"id": "azureChatOpenAI_LlamaIndex_0-azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex-simpleChatEngine_0-simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "1.4.11",
|
||||
"version": "1.5.0",
|
||||
"description": "Flowiseai Server",
|
||||
"main": "dist/index",
|
||||
"types": "dist/index.d.ts",
|
||||
|
|
|
|||
|
|
@ -40,7 +40,8 @@ export const init = async (): Promise<void> => {
|
|||
synchronize: false,
|
||||
migrationsRun: false,
|
||||
entities: Object.values(entities),
|
||||
migrations: mysqlMigrations
|
||||
migrations: mysqlMigrations,
|
||||
ssl: getDatabaseSSLFromEnv()
|
||||
})
|
||||
break
|
||||
case 'postgres':
|
||||
|
|
@ -51,18 +52,7 @@ export const init = async (): Promise<void> => {
|
|||
username: process.env.DATABASE_USER,
|
||||
password: process.env.DATABASE_PASSWORD,
|
||||
database: process.env.DATABASE_NAME,
|
||||
...(process.env.DATABASE_SSL_KEY_BASE64
|
||||
? {
|
||||
ssl: {
|
||||
rejectUnauthorized: false,
|
||||
cert: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64')
|
||||
}
|
||||
}
|
||||
: process.env.DATABASE_SSL === 'true'
|
||||
? {
|
||||
ssl: true
|
||||
}
|
||||
: {}),
|
||||
ssl: getDatabaseSSLFromEnv(),
|
||||
synchronize: false,
|
||||
migrationsRun: false,
|
||||
entities: Object.values(entities),
|
||||
|
|
@ -89,3 +79,15 @@ export function getDataSource(): DataSource {
|
|||
}
|
||||
return appDataSource
|
||||
}
|
||||
|
||||
const getDatabaseSSLFromEnv = () => {
|
||||
if (process.env.DATABASE_SSL_KEY_BASE64) {
|
||||
return {
|
||||
rejectUnauthorized: false,
|
||||
ca: Buffer.from(process.env.DATABASE_SSL_KEY_BASE64, 'base64')
|
||||
}
|
||||
} else if (process.env.DATABASE_SSL === 'true') {
|
||||
return true
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
|
|
|||
|
|
@ -482,7 +482,12 @@ export class App {
|
|||
const isEndingNode = endingNodeData?.outputs?.output === 'EndingNode'
|
||||
|
||||
if (!isEndingNode) {
|
||||
if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') {
|
||||
if (
|
||||
endingNodeData &&
|
||||
endingNodeData.category !== 'Chains' &&
|
||||
endingNodeData.category !== 'Agents' &&
|
||||
endingNodeData.category !== 'Engine'
|
||||
) {
|
||||
return res.status(500).send(`Ending node must be either a Chain or Agent`)
|
||||
}
|
||||
}
|
||||
|
|
@ -1440,7 +1445,7 @@ export class App {
|
|||
chatType,
|
||||
chatId,
|
||||
memoryType: memoryType ?? (chatId ? IsNull() : undefined),
|
||||
sessionId: sessionId ?? (chatId ? IsNull() : undefined),
|
||||
sessionId: sessionId ?? undefined,
|
||||
createdDate: toDate && fromDate ? Between(fromDate, toDate) : undefined
|
||||
},
|
||||
order: {
|
||||
|
|
@ -1690,7 +1695,12 @@ export class App {
|
|||
const isEndingNode = endingNodeData?.outputs?.output === 'EndingNode'
|
||||
|
||||
if (!isEndingNode) {
|
||||
if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') {
|
||||
if (
|
||||
endingNodeData &&
|
||||
endingNodeData.category !== 'Chains' &&
|
||||
endingNodeData.category !== 'Agents' &&
|
||||
endingNodeData.category !== 'Engine'
|
||||
) {
|
||||
return res.status(500).send(`Ending node must be either a Chain or Agent`)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -818,7 +818,16 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component
|
|||
*/
|
||||
export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => {
|
||||
const streamAvailableLLMs = {
|
||||
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock', 'chatMistralAI'],
|
||||
'Chat Models': [
|
||||
'azureChatOpenAI',
|
||||
'chatOpenAI',
|
||||
'chatOpenAI_LlamaIndex',
|
||||
'chatAnthropic',
|
||||
'chatAnthropic_LlamaIndex',
|
||||
'chatOllama',
|
||||
'awsChatBedrock',
|
||||
'chatMistralAI'
|
||||
],
|
||||
LLMs: ['azureOpenAI', 'openAI', 'ollama']
|
||||
}
|
||||
|
||||
|
|
@ -841,6 +850,9 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod
|
|||
// Agent that are available to stream
|
||||
const whitelistAgents = ['openAIFunctionAgent', 'csvAgent', 'airtableAgent', 'conversationalRetrievalAgent']
|
||||
isValidChainOrAgent = whitelistAgents.includes(endingNodeData.name)
|
||||
} else if (endingNodeData.category === 'Engine') {
|
||||
const whitelistEngine = ['contextChatEngine', 'simpleChatEngine', 'queryEngine', 'subQuestionQueryEngine']
|
||||
isValidChainOrAgent = whitelistEngine.includes(endingNodeData.name)
|
||||
}
|
||||
|
||||
// If no output parser, flow is available to stream
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise-ui",
|
||||
"version": "1.4.8",
|
||||
"version": "1.5.0",
|
||||
"license": "SEE LICENSE IN LICENSE.md",
|
||||
"homepage": "https://flowiseai.com",
|
||||
"author": {
|
||||
|
|
|
|||
|
After Width: | Height: | Size: 28 KiB |
|
|
@ -132,6 +132,35 @@ const NodeInfoDialog = ({ show, dialogProps, onCancel }) => {
|
|||
</span>
|
||||
</div>
|
||||
)}
|
||||
{dialogProps.data.tags &&
|
||||
dialogProps.data.tags.length &&
|
||||
dialogProps.data.tags.map((tag, index) => (
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
flexDirection: 'row',
|
||||
width: 'max-content',
|
||||
borderRadius: 15,
|
||||
background: '#cae9ff',
|
||||
padding: 5,
|
||||
paddingLeft: 10,
|
||||
paddingRight: 10,
|
||||
marginTop: 5,
|
||||
marginLeft: 10,
|
||||
marginBottom: 5
|
||||
}}
|
||||
key={index}
|
||||
>
|
||||
<span
|
||||
style={{
|
||||
color: '#023e7d',
|
||||
fontSize: '0.825rem'
|
||||
}}
|
||||
>
|
||||
{tag.toLowerCase()}
|
||||
</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -277,6 +277,7 @@ export const generateExportFlowData = (flowData) => {
|
|||
name: node.data.name,
|
||||
type: node.data.type,
|
||||
baseClasses: node.data.baseClasses,
|
||||
tags: node.data.tags,
|
||||
category: node.data.category,
|
||||
description: node.data.description,
|
||||
inputParams: node.data.inputParams,
|
||||
|
|
|
|||
|
|
@ -22,7 +22,9 @@ import {
|
|||
Popper,
|
||||
Stack,
|
||||
Typography,
|
||||
Chip
|
||||
Chip,
|
||||
Tab,
|
||||
Tabs
|
||||
} from '@mui/material'
|
||||
import ExpandMoreIcon from '@mui/icons-material/ExpandMore'
|
||||
|
||||
|
|
@ -36,12 +38,20 @@ import { StyledFab } from 'ui-component/button/StyledFab'
|
|||
|
||||
// icons
|
||||
import { IconPlus, IconSearch, IconMinus, IconX } from '@tabler/icons'
|
||||
import LlamaindexPNG from 'assets/images/llamaindex.png'
|
||||
import LangChainPNG from 'assets/images/langchain.png'
|
||||
|
||||
// const
|
||||
import { baseURL } from 'store/constant'
|
||||
import { SET_COMPONENT_NODES } from 'store/actions'
|
||||
|
||||
// ==============================|| ADD NODES||============================== //
|
||||
function a11yProps(index) {
|
||||
return {
|
||||
id: `attachment-tab-${index}`,
|
||||
'aria-controls': `attachment-tabpanel-${index}`
|
||||
}
|
||||
}
|
||||
|
||||
const AddNodes = ({ nodesData, node }) => {
|
||||
const theme = useTheme()
|
||||
|
|
@ -52,6 +62,7 @@ const AddNodes = ({ nodesData, node }) => {
|
|||
const [nodes, setNodes] = useState({})
|
||||
const [open, setOpen] = useState(false)
|
||||
const [categoryExpanded, setCategoryExpanded] = useState({})
|
||||
const [tabValue, setTabValue] = useState(0)
|
||||
|
||||
const anchorRef = useRef(null)
|
||||
const prevOpen = useRef(open)
|
||||
|
|
@ -86,6 +97,11 @@ const AddNodes = ({ nodesData, node }) => {
|
|||
}
|
||||
}
|
||||
|
||||
const handleTabChange = (event, newValue) => {
|
||||
setTabValue(newValue)
|
||||
filterSearch(searchValue, newValue)
|
||||
}
|
||||
|
||||
const getSearchedNodes = (value) => {
|
||||
const passed = nodesData.filter((nd) => {
|
||||
const passesQuery = nd.name.toLowerCase().includes(value.toLowerCase())
|
||||
|
|
@ -95,23 +111,34 @@ const AddNodes = ({ nodesData, node }) => {
|
|||
return passed
|
||||
}
|
||||
|
||||
const filterSearch = (value) => {
|
||||
const filterSearch = (value, newTabValue) => {
|
||||
setSearchValue(value)
|
||||
setTimeout(() => {
|
||||
if (value) {
|
||||
const returnData = getSearchedNodes(value)
|
||||
groupByCategory(returnData, true)
|
||||
groupByCategory(returnData, newTabValue ?? tabValue, true)
|
||||
scrollTop()
|
||||
} else if (value === '') {
|
||||
groupByCategory(nodesData)
|
||||
groupByCategory(nodesData, newTabValue ?? tabValue)
|
||||
scrollTop()
|
||||
}
|
||||
}, 500)
|
||||
}
|
||||
|
||||
const groupByCategory = (nodes, isFilter) => {
|
||||
const groupByTags = (nodes, newTabValue = 0) => {
|
||||
const langchainNodes = nodes.filter((nd) => !nd.tags)
|
||||
const llmaindexNodes = nodes.filter((nd) => nd.tags && nd.tags.includes('LlamaIndex'))
|
||||
if (newTabValue === 0) {
|
||||
return langchainNodes
|
||||
} else {
|
||||
return llmaindexNodes
|
||||
}
|
||||
}
|
||||
|
||||
const groupByCategory = (nodes, newTabValue, isFilter) => {
|
||||
const taggedNodes = groupByTags(nodes, newTabValue)
|
||||
const accordianCategories = {}
|
||||
const result = nodes.reduce(function (r, a) {
|
||||
const result = taggedNodes.reduce(function (r, a) {
|
||||
r[a.category] = r[a.category] || []
|
||||
r[a.category].push(a)
|
||||
accordianCategories[a.category] = isFilter ? true : false
|
||||
|
|
@ -244,15 +271,72 @@ const AddNodes = ({ nodesData, node }) => {
|
|||
'aria-label': 'weight'
|
||||
}}
|
||||
/>
|
||||
<Tabs
|
||||
sx={{ position: 'relative', minHeight: '50px', height: '50px' }}
|
||||
variant='fullWidth'
|
||||
value={tabValue}
|
||||
onChange={handleTabChange}
|
||||
aria-label='tabs'
|
||||
>
|
||||
{['LangChain', 'LlamaIndex'].map((item, index) => (
|
||||
<Tab
|
||||
icon={
|
||||
<div
|
||||
style={{
|
||||
borderRadius: '50%'
|
||||
}}
|
||||
>
|
||||
<img
|
||||
style={{
|
||||
width: '25px',
|
||||
height: '25px',
|
||||
borderRadius: '50%',
|
||||
objectFit: 'contain'
|
||||
}}
|
||||
src={index === 0 ? LangChainPNG : LlamaindexPNG}
|
||||
alt={item}
|
||||
/>
|
||||
</div>
|
||||
}
|
||||
iconPosition='start'
|
||||
sx={{ minHeight: '50px', height: '50px' }}
|
||||
key={index}
|
||||
label={item}
|
||||
{...a11yProps(index)}
|
||||
></Tab>
|
||||
))}
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
flexDirection: 'row',
|
||||
alignItems: 'center',
|
||||
borderRadius: 10,
|
||||
background: 'rgb(254,252,191)',
|
||||
paddingLeft: 6,
|
||||
paddingRight: 6,
|
||||
paddingTop: 1,
|
||||
paddingBottom: 1,
|
||||
width: 'max-content',
|
||||
position: 'absolute',
|
||||
top: 0,
|
||||
right: 0,
|
||||
fontSize: '0.65rem',
|
||||
fontWeight: 700
|
||||
}}
|
||||
>
|
||||
<span style={{ color: 'rgb(116,66,16)' }}>BETA</span>
|
||||
</div>
|
||||
</Tabs>
|
||||
|
||||
<Divider />
|
||||
</Box>
|
||||
<PerfectScrollbar
|
||||
containerRef={(el) => {
|
||||
ps.current = el
|
||||
}}
|
||||
style={{ height: '100%', maxHeight: 'calc(100vh - 320px)', overflowX: 'hidden' }}
|
||||
style={{ height: '100%', maxHeight: 'calc(100vh - 380px)', overflowX: 'hidden' }}
|
||||
>
|
||||
<Box sx={{ p: 2 }}>
|
||||
<Box sx={{ p: 2, pt: 0 }}>
|
||||
<List
|
||||
sx={{
|
||||
width: '100%',
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import NodeInfoDialog from 'ui-component/dialog/NodeInfoDialog'
|
|||
import { baseURL } from 'store/constant'
|
||||
import { IconTrash, IconCopy, IconInfoCircle, IconAlertTriangle } from '@tabler/icons'
|
||||
import { flowContext } from 'store/context/ReactFlowContext'
|
||||
import LlamaindexPNG from 'assets/images/llamaindex.png'
|
||||
|
||||
// ===========================|| CANVAS NODE ||=========================== //
|
||||
|
||||
|
|
@ -158,9 +159,25 @@ const CanvasNode = ({ data }) => {
|
|||
{data.label}
|
||||
</Typography>
|
||||
</Box>
|
||||
<div style={{ flexGrow: 1 }}></div>
|
||||
{data.tags && data.tags.includes('LlamaIndex') && (
|
||||
<>
|
||||
<div
|
||||
style={{
|
||||
borderRadius: '50%',
|
||||
padding: 15
|
||||
}}
|
||||
>
|
||||
<img
|
||||
style={{ width: '25px', height: '25px', borderRadius: '50%', objectFit: 'contain' }}
|
||||
src={LlamaindexPNG}
|
||||
alt='LlamaIndex'
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
{warningMessage && (
|
||||
<>
|
||||
<div style={{ flexGrow: 1 }}></div>
|
||||
<Tooltip title={<span style={{ whiteSpace: 'pre-line' }}>{warningMessage}</span>} placement='top'>
|
||||
<IconButton sx={{ height: 35, width: 35 }}>
|
||||
<IconAlertTriangle size={35} color='orange' />
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import AdditionalParamsDialog from 'ui-component/dialog/AdditionalParamsDialog'
|
|||
|
||||
// const
|
||||
import { baseURL } from 'store/constant'
|
||||
import LlamaindexPNG from 'assets/images/llamaindex.png'
|
||||
|
||||
const CardWrapper = styled(MainCard)(({ theme }) => ({
|
||||
background: theme.palette.card.main,
|
||||
|
|
@ -87,6 +88,23 @@ const MarketplaceCanvasNode = ({ data }) => {
|
|||
{data.label}
|
||||
</Typography>
|
||||
</Box>
|
||||
<div style={{ flexGrow: 1 }}></div>
|
||||
{data.tags && data.tags.includes('LlamaIndex') && (
|
||||
<>
|
||||
<div
|
||||
style={{
|
||||
borderRadius: '50%',
|
||||
padding: 15
|
||||
}}
|
||||
>
|
||||
<img
|
||||
style={{ width: '25px', height: '25px', borderRadius: '50%', objectFit: 'contain' }}
|
||||
src={LlamaindexPNG}
|
||||
alt='LlamaIndex'
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
{(data.inputAnchors.length > 0 || data.inputParams.length > 0) && (
|
||||
<>
|
||||
|
|
|
|||