diff --git a/package.json b/package.json
index 210a95c49..e123cc681 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise",
- "version": "1.3.6",
+ "version": "1.3.7",
"private": true,
"homepage": "https://flowiseai.com",
"workspaces": [
diff --git a/packages/components/credentials/AWSCredential.credential.ts b/packages/components/credentials/AWSCredential.credential.ts
new file mode 100644
index 000000000..3c9dd3a61
--- /dev/null
+++ b/packages/components/credentials/AWSCredential.credential.ts
@@ -0,0 +1,47 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class AWSApi implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ description: string
+ optional: boolean
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'AWS security credentials'
+ this.name = 'awsApi'
+ this.version = 1.0
+ this.description =
+ 'Your AWS security credentials. When unspecified, credentials will be sourced from the runtime environment according to the default AWS SDK behavior.'
+ this.optional = true
+ this.inputs = [
+ {
+ label: 'AWS Access Key',
+ name: 'awsKey',
+ type: 'string',
+ placeholder: '',
+ description: 'The access key for your AWS account.',
+ optional: true
+ },
+ {
+ label: 'AWS Secret Access Key',
+ name: 'awsSecret',
+ type: 'password',
+ placeholder: '',
+ description: 'The secret key for your AWS account.',
+ optional: true
+ },
+ {
+ label: 'AWS Session Key',
+ name: 'awsSession',
+ type: 'password',
+ placeholder: '',
+ description: 'The session key for your AWS account. This is only needed when you are using temporary credentials.',
+ optional: true
+ }
+ ]
+ }
+}
+
+module.exports = { credClass: AWSApi }
diff --git a/packages/components/credentials/MomentoCacheApi.credential.ts b/packages/components/credentials/MomentoCacheApi.credential.ts
new file mode 100644
index 000000000..038f826d9
--- /dev/null
+++ b/packages/components/credentials/MomentoCacheApi.credential.ts
@@ -0,0 +1,36 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class MomentoCacheApi implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ description: string
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'Momento Cache API'
+ this.name = 'momentoCacheApi'
+ this.version = 1.0
+ this.description =
+ 'Refer to official guide on how to get API key on Momento'
+ this.inputs = [
+ {
+ label: 'Cache',
+ name: 'momentoCache',
+ type: 'string'
+ },
+ {
+ label: 'API Key',
+ name: 'momentoApiKey',
+ type: 'password'
+ },
+ {
+ label: 'Endpoint',
+ name: 'momentoEndpoint',
+ type: 'string'
+ }
+ ]
+ }
+}
+
+module.exports = { credClass: MomentoCacheApi }
diff --git a/packages/components/credentials/RedisCacheApi.credential.ts b/packages/components/credentials/RedisCacheApi.credential.ts
new file mode 100644
index 000000000..e09a94e7a
--- /dev/null
+++ b/packages/components/credentials/RedisCacheApi.credential.ts
@@ -0,0 +1,43 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class RedisCacheApi implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ description: string
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'Redis Cache API'
+ this.name = 'redisCacheApi'
+ this.version = 1.0
+ this.inputs = [
+ {
+ label: 'Redis Host',
+ name: 'redisCacheHost',
+ type: 'string',
+ default: '127.0.0.1'
+ },
+ {
+ label: 'Port',
+ name: 'redisCachePort',
+ type: 'number',
+ default: '6789'
+ },
+ {
+ label: 'User',
+ name: 'redisCacheUser',
+ type: 'string',
+ placeholder: ''
+ },
+ {
+ label: 'Password',
+ name: 'redisCachePwd',
+ type: 'password',
+ placeholder: ''
+ }
+ ]
+ }
+}
+
+module.exports = { credClass: RedisCacheApi }
diff --git a/packages/components/credentials/UpstashRedisApi.credential.ts b/packages/components/credentials/UpstashRedisApi.credential.ts
new file mode 100644
index 000000000..b6e62ff35
--- /dev/null
+++ b/packages/components/credentials/UpstashRedisApi.credential.ts
@@ -0,0 +1,28 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class UpstashRedisApi implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'Upstash Redis API'
+ this.name = 'upstashRedisApi'
+ this.version = 1.0
+ this.inputs = [
+ {
+ label: 'Upstash Redis REST URL',
+ name: 'upstashConnectionUrl',
+ type: 'string'
+ },
+ {
+ label: 'Token',
+ name: 'upstashConnectionToken',
+ type: 'password'
+ }
+ ]
+ }
+}
+
+module.exports = { credClass: UpstashRedisApi }
diff --git a/packages/components/credentials/UpstashRedisMemoryApi.credential.ts b/packages/components/credentials/UpstashRedisMemoryApi.credential.ts
new file mode 100644
index 000000000..8d3e95286
--- /dev/null
+++ b/packages/components/credentials/UpstashRedisMemoryApi.credential.ts
@@ -0,0 +1,26 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class UpstashRedisMemoryApi implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ description: string
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'Upstash Redis Memory API'
+ this.name = 'upstashRedisMemoryApi'
+ this.version = 1.0
+ this.description =
+ 'Refer to official guide on how to create redis instance and get redis REST Token'
+ this.inputs = [
+ {
+ label: 'Upstash Redis REST Token',
+ name: 'upstashRestToken',
+ type: 'password'
+ }
+ ]
+ }
+}
+
+module.exports = { credClass: UpstashRedisMemoryApi }
diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts
index 661ef151d..00f825d44 100644
--- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts
+++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts
@@ -95,8 +95,12 @@ class ConversationalAgent_Agents implements INode {
const callbacks = await additionalCallbacks(nodeData, options)
if (options && options.chatHistory) {
- memory.chatHistory = mapChatHistory(options)
- executor.memory = memory
+ const chatHistoryClassName = memory.chatHistory.constructor.name
+ // Only replace when its In-Memory
+ if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
+ memory.chatHistory = mapChatHistory(options)
+ executor.memory = memory
+ }
}
const result = await executor.call({ input }, [...callbacks])
diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts
index 3d70a2d32..4a908d7fe 100644
--- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts
+++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts
@@ -82,7 +82,11 @@ class ConversationalRetrievalAgent_Agents implements INode {
if (executor.memory) {
;(executor.memory as any).memoryKey = 'chat_history'
;(executor.memory as any).outputKey = 'output'
- ;(executor.memory as any).chatHistory = mapChatHistory(options)
+ const chatHistoryClassName = (executor.memory as any).chatHistory.constructor.name
+ // Only replace when its In-Memory
+ if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
+ ;(executor.memory as any).chatHistory = mapChatHistory(options)
+ }
}
const loggerHandler = new ConsoleCallbackHandler(options.logger)
diff --git a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts
index c1bd32ec5..c920c399e 100644
--- a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts
+++ b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts
@@ -81,8 +81,12 @@ class OpenAIFunctionAgent_Agents implements INode {
const memory = nodeData.inputs?.memory as BaseChatMemory
if (options && options.chatHistory) {
- memory.chatHistory = mapChatHistory(options)
- executor.memory = memory
+ const chatHistoryClassName = memory.chatHistory.constructor.name
+ // Only replace when its In-Memory
+ if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
+ memory.chatHistory = mapChatHistory(options)
+ executor.memory = memory
+ }
}
const loggerHandler = new ConsoleCallbackHandler(options.logger)
diff --git a/packages/components/nodes/cache/MomentoCache/MomentoCache.ts b/packages/components/nodes/cache/MomentoCache/MomentoCache.ts
new file mode 100644
index 000000000..9aa82e822
--- /dev/null
+++ b/packages/components/nodes/cache/MomentoCache/MomentoCache.ts
@@ -0,0 +1,57 @@
+import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
+import { MomentoCache as LangchainMomentoCache } from 'langchain/cache/momento'
+import { CacheClient, Configurations, CredentialProvider } from '@gomomento/sdk'
+
+class MomentoCache implements INode {
+ label: string
+ name: string
+ version: number
+ description: string
+ type: string
+ icon: string
+ category: string
+ baseClasses: string[]
+ inputs: INodeParams[]
+ credential: INodeParams
+
+ constructor() {
+ this.label = 'Momento Cache'
+ this.name = 'momentoCache'
+ this.version = 1.0
+ this.type = 'MomentoCache'
+ this.icon = 'momento.png'
+ this.category = 'Cache'
+ this.baseClasses = [this.type, ...getBaseClasses(LangchainMomentoCache)]
+ this.credential = {
+ label: 'Connect Credential',
+ name: 'credential',
+ type: 'credential',
+ optional: true,
+ credentialNames: ['momentoCacheApi']
+ }
+ this.inputs = []
+ }
+
+ async init(nodeData: INodeData, _: string, options: ICommonObject): Promise {
+ const credentialData = await getCredentialData(nodeData.credential ?? '', options)
+ const apiKey = getCredentialParam('momentoApiKey', credentialData, nodeData)
+ const cacheName = getCredentialParam('momentoCache', credentialData, nodeData)
+
+ // See https://github.com/momentohq/client-sdk-javascript for connection options
+ const client = new CacheClient({
+ configuration: Configurations.Laptop.v1(),
+ credentialProvider: CredentialProvider.fromString({
+ apiKey: apiKey
+ }),
+ defaultTtlSeconds: 60 * 60 * 24
+ })
+
+ let momentoCache = await LangchainMomentoCache.fromProps({
+ client,
+ cacheName: cacheName
+ })
+ return momentoCache
+ }
+}
+
+module.exports = { nodeClass: MomentoCache }
diff --git a/packages/components/nodes/cache/MomentoCache/momento.png b/packages/components/nodes/cache/MomentoCache/momento.png
new file mode 100644
index 000000000..0f2b54b6a
Binary files /dev/null and b/packages/components/nodes/cache/MomentoCache/momento.png differ
diff --git a/packages/components/nodes/cache/RedisCache/RedisCache.ts b/packages/components/nodes/cache/RedisCache/RedisCache.ts
new file mode 100644
index 000000000..c1b08be69
--- /dev/null
+++ b/packages/components/nodes/cache/RedisCache/RedisCache.ts
@@ -0,0 +1,52 @@
+import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
+import { RedisCache as LangchainRedisCache } from 'langchain/cache/ioredis'
+import { Redis } from 'ioredis'
+
+class RedisCache implements INode {
+ label: string
+ name: string
+ version: number
+ description: string
+ type: string
+ icon: string
+ category: string
+ baseClasses: string[]
+ inputs: INodeParams[]
+ credential: INodeParams
+
+ constructor() {
+ this.label = 'Redis Cache'
+ this.name = 'redisCache'
+ this.version = 1.0
+ this.type = 'RedisCache'
+ this.icon = 'redis.svg'
+ this.category = 'Cache'
+ this.baseClasses = [this.type, ...getBaseClasses(LangchainRedisCache)]
+ this.credential = {
+ label: 'Connect Credential',
+ name: 'credential',
+ type: 'credential',
+ optional: true,
+ credentialNames: ['redisCacheApi']
+ }
+ this.inputs = []
+ }
+
+ async init(nodeData: INodeData, _: string, options: ICommonObject): Promise {
+ const credentialData = await getCredentialData(nodeData.credential ?? '', options)
+ const username = getCredentialParam('redisCacheUser', credentialData, nodeData)
+ const password = getCredentialParam('redisCachePwd', credentialData, nodeData)
+ const portStr = getCredentialParam('redisCachePort', credentialData, nodeData)
+ const host = getCredentialParam('redisCacheHost', credentialData, nodeData)
+
+ const client = new Redis({
+ port: portStr ? parseInt(portStr) : 6379,
+ host,
+ username,
+ password
+ })
+ return new LangchainRedisCache(client)
+ }
+}
+
+module.exports = { nodeClass: RedisCache }
diff --git a/packages/components/nodes/cache/RedisCache/redis.svg b/packages/components/nodes/cache/RedisCache/redis.svg
new file mode 100644
index 000000000..903590697
--- /dev/null
+++ b/packages/components/nodes/cache/RedisCache/redis.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/packages/components/nodes/cache/UpstashRedisCache/UpstashRedisCache.ts b/packages/components/nodes/cache/UpstashRedisCache/UpstashRedisCache.ts
new file mode 100644
index 000000000..eb5a9e2f6
--- /dev/null
+++ b/packages/components/nodes/cache/UpstashRedisCache/UpstashRedisCache.ts
@@ -0,0 +1,49 @@
+import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
+import { UpstashRedisCache as LangchainUpstashRedisCache } from 'langchain/cache/upstash_redis'
+
+class UpstashRedisCache implements INode {
+ label: string
+ name: string
+ version: number
+ description: string
+ type: string
+ icon: string
+ category: string
+ baseClasses: string[]
+ inputs: INodeParams[]
+ credential: INodeParams
+
+ constructor() {
+ this.label = 'Upstash Redis Cache'
+ this.name = 'upstashRedisCache'
+ this.version = 1.0
+ this.type = 'UpstashRedisCache'
+ this.icon = 'upstash.png'
+ this.category = 'Cache'
+ this.baseClasses = [this.type, ...getBaseClasses(LangchainUpstashRedisCache)]
+ this.credential = {
+ label: 'Connect Credential',
+ name: 'credential',
+ type: 'credential',
+ optional: true,
+ credentialNames: ['upstashRedisApi']
+ }
+ this.inputs = []
+ }
+
+ async init(nodeData: INodeData, _: string, options: ICommonObject): Promise {
+ const credentialData = await getCredentialData(nodeData.credential ?? '', options)
+ const upstashConnectionUrl = getCredentialParam('upstashConnectionUrl', credentialData, nodeData)
+ const upstashToken = getCredentialParam('upstashConnectionToken', credentialData, nodeData)
+
+ const cache = new LangchainUpstashRedisCache({
+ config: {
+ url: upstashConnectionUrl,
+ token: upstashToken
+ }
+ })
+ return cache
+ }
+}
+
+module.exports = { nodeClass: UpstashRedisCache }
diff --git a/packages/components/nodes/cache/UpstashRedisCache/upstash.png b/packages/components/nodes/cache/UpstashRedisCache/upstash.png
new file mode 100644
index 000000000..e27e02f4a
Binary files /dev/null and b/packages/components/nodes/cache/UpstashRedisCache/upstash.png differ
diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts
index b26603e29..1cd15c9a5 100644
--- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts
+++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts
@@ -106,8 +106,12 @@ class ConversationChain_Chains implements INode {
const memory = nodeData.inputs?.memory as BufferMemory
if (options && options.chatHistory) {
- memory.chatHistory = mapChatHistory(options)
- chain.memory = memory
+ const chatHistoryClassName = memory.chatHistory.constructor.name
+ // Only replace when its In-Memory
+ if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
+ memory.chatHistory = mapChatHistory(options)
+ chain.memory = memory
+ }
}
const loggerHandler = new ConsoleCallbackHandler(options.logger)
diff --git a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts
index 1b4675bda..9a8c1b188 100644
--- a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts
+++ b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts
@@ -179,7 +179,11 @@ class ConversationalRetrievalQAChain_Chains implements INode {
const obj = { question: input }
if (options && options.chatHistory && chain.memory) {
- ;(chain.memory as any).chatHistory = mapChatHistory(options)
+ const chatHistoryClassName = (chain.memory as any).chatHistory.constructor.name
+ // Only replace when its In-Memory
+ if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
+ ;(chain.memory as any).chatHistory = mapChatHistory(options)
+ }
}
const loggerHandler = new ConsoleCallbackHandler(options.logger)
diff --git a/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts b/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts
index 5d270fd8e..ac33fa0ea 100644
--- a/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts
+++ b/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts
@@ -1,5 +1,5 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
-import { SqlDatabaseChain, SqlDatabaseChainInput } from 'langchain/chains/sql_db'
+import { SqlDatabaseChain, SqlDatabaseChainInput, DEFAULT_SQL_DATABASE_PROMPT } from 'langchain/chains/sql_db'
import { getBaseClasses, getInputVariables } from '../../../src/utils'
import { DataSource } from 'typeorm'
import { SqlDatabase } from 'langchain/sql_db'
@@ -10,25 +10,6 @@ import { DataSourceOptions } from 'typeorm/data-source'
type DatabaseType = 'sqlite' | 'postgres' | 'mssql' | 'mysql'
-const defaultPrompt = `Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.
-
-Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
-
-Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
-
-Use the following format:
-
-Question: "Question here"
-SQLQuery: "SQL Query to run"
-SQLResult: "Result of the SQLQuery"
-Answer: "Final answer here"
-
-Only use the tables listed below.
-
-{table_info}
-
-Question: {input}`
-
class SqlDatabaseChain_Chains implements INode {
label: string
name: string
@@ -43,7 +24,7 @@ class SqlDatabaseChain_Chains implements INode {
constructor() {
this.label = 'Sql Database Chain'
this.name = 'sqlDatabaseChain'
- this.version = 3.0
+ this.version = 4.0
this.type = 'SqlDatabaseChain'
this.icon = 'sqlchain.svg'
this.category = 'Chains'
@@ -89,7 +70,8 @@ class SqlDatabaseChain_Chains implements INode {
label: 'Include Tables',
name: 'includesTables',
type: 'string',
- description: 'Tables to include for queries.',
+ description: 'Tables to include for queries, seperated by comma. Can only use Include Tables or Ignore Tables',
+ placeholder: 'table1, table2',
additionalParams: true,
optional: true
},
@@ -97,7 +79,8 @@ class SqlDatabaseChain_Chains implements INode {
label: 'Ignore Tables',
name: 'ignoreTables',
type: 'string',
- description: 'Tables to ignore for queries.',
+ description: 'Tables to ignore for queries, seperated by comma. Can only use Ignore Tables or Include Tables',
+ placeholder: 'table1, table2',
additionalParams: true,
optional: true
},
@@ -129,7 +112,7 @@ class SqlDatabaseChain_Chains implements INode {
warning:
'Prompt must include 3 input variables: {input}, {dialect}, {table_info}. You can refer to official guide from description above',
rows: 4,
- placeholder: defaultPrompt,
+ placeholder: DEFAULT_SQL_DATABASE_PROMPT.template + DEFAULT_SQL_DATABASE_PROMPT.templateFormat,
additionalParams: true,
optional: true
}
diff --git a/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts b/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts
new file mode 100644
index 000000000..16fbc8dc1
--- /dev/null
+++ b/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts
@@ -0,0 +1,176 @@
+import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
+import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
+import { ChatBedrock } from 'langchain/chat_models/bedrock'
+import { BaseBedrockInput } from 'langchain/dist/util/bedrock'
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
+
+/**
+ * I had to run the following to build the component
+ * and get the icon copied over to the dist directory
+ * Flowise/packages/components > yarn build
+ *
+ * @author Michael Connor
+ */
+class AWSChatBedrock_ChatModels implements INode {
+ label: string
+ name: string
+ version: number
+ type: string
+ icon: string
+ category: string
+ description: string
+ baseClasses: string[]
+ credential: INodeParams
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'AWS Bedrock'
+ this.name = 'awsChatBedrock'
+ this.version = 2.0
+ this.type = 'AWSChatBedrock'
+ this.icon = 'awsBedrock.png'
+ this.category = 'Chat Models'
+ this.description = 'Wrapper around AWS Bedrock large language models'
+ this.baseClasses = [this.type, ...getBaseClasses(ChatBedrock)]
+ this.credential = {
+ label: 'AWS Credential',
+ name: 'credential',
+ type: 'credential',
+ credentialNames: ['awsApi'],
+ optional: true
+ }
+ this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
+ {
+ label: 'Region',
+ name: 'region',
+ type: 'options',
+ options: [
+ { label: 'af-south-1', name: 'af-south-1' },
+ { label: 'ap-east-1', name: 'ap-east-1' },
+ { label: 'ap-northeast-1', name: 'ap-northeast-1' },
+ { label: 'ap-northeast-2', name: 'ap-northeast-2' },
+ { label: 'ap-northeast-3', name: 'ap-northeast-3' },
+ { label: 'ap-south-1', name: 'ap-south-1' },
+ { label: 'ap-south-2', name: 'ap-south-2' },
+ { label: 'ap-southeast-1', name: 'ap-southeast-1' },
+ { label: 'ap-southeast-2', name: 'ap-southeast-2' },
+ { label: 'ap-southeast-3', name: 'ap-southeast-3' },
+ { label: 'ap-southeast-4', name: 'ap-southeast-4' },
+ { label: 'ap-southeast-5', name: 'ap-southeast-5' },
+ { label: 'ap-southeast-6', name: 'ap-southeast-6' },
+ { label: 'ca-central-1', name: 'ca-central-1' },
+ { label: 'ca-west-1', name: 'ca-west-1' },
+ { label: 'cn-north-1', name: 'cn-north-1' },
+ { label: 'cn-northwest-1', name: 'cn-northwest-1' },
+ { label: 'eu-central-1', name: 'eu-central-1' },
+ { label: 'eu-central-2', name: 'eu-central-2' },
+ { label: 'eu-north-1', name: 'eu-north-1' },
+ { label: 'eu-south-1', name: 'eu-south-1' },
+ { label: 'eu-south-2', name: 'eu-south-2' },
+ { label: 'eu-west-1', name: 'eu-west-1' },
+ { label: 'eu-west-2', name: 'eu-west-2' },
+ { label: 'eu-west-3', name: 'eu-west-3' },
+ { label: 'il-central-1', name: 'il-central-1' },
+ { label: 'me-central-1', name: 'me-central-1' },
+ { label: 'me-south-1', name: 'me-south-1' },
+ { label: 'sa-east-1', name: 'sa-east-1' },
+ { label: 'us-east-1', name: 'us-east-1' },
+ { label: 'us-east-2', name: 'us-east-2' },
+ { label: 'us-gov-east-1', name: 'us-gov-east-1' },
+ { label: 'us-gov-west-1', name: 'us-gov-west-1' },
+ { label: 'us-west-1', name: 'us-west-1' },
+ { label: 'us-west-2', name: 'us-west-2' }
+ ],
+ default: 'us-east-1',
+ optional: false
+ },
+ {
+ label: 'Model Name',
+ name: 'model',
+ type: 'options',
+ options: [
+ { label: 'amazon.titan-tg1-large', name: 'amazon.titan-tg1-large' },
+ { label: 'amazon.titan-e1t-medium', name: 'amazon.titan-e1t-medium' },
+ { label: 'stability.stable-diffusion-xl', name: 'stability.stable-diffusion-xl' },
+ { label: 'ai21.j2-grande-instruct', name: 'ai21.j2-grande-instruct' },
+ { label: 'ai21.j2-jumbo-instruct', name: 'ai21.j2-jumbo-instruct' },
+ { label: 'ai21.j2-mid', name: 'ai21.j2-mid' },
+ { label: 'ai21.j2-ultra', name: 'ai21.j2-ultra' },
+ { label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
+ { label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
+ { label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' }
+ ],
+ default: 'anthropic.claude-v2',
+ optional: false
+ },
+ {
+ label: 'Temperature',
+ name: 'temperature',
+ type: 'number',
+ step: 0.1,
+ description: 'Temperature parameter may not apply to certain model. Please check available model parameters',
+ optional: true,
+ default: 0.7,
+ additionalParams: false
+ },
+ {
+ label: 'Max Tokens to Sample',
+ name: 'max_tokens_to_sample',
+ type: 'number',
+ step: 10,
+ description: 'Max Tokens parameter may not apply to certain model. Please check available model parameters',
+ optional: false,
+ default: 200,
+ additionalParams: false
+ }
+ ]
+ }
+
+ async init(nodeData: INodeData, _: string, options: ICommonObject): Promise {
+ const iRegion = nodeData.inputs?.region as string
+ const iModel = nodeData.inputs?.model as string
+ const iTemperature = nodeData.inputs?.temperature as string
+ const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
+ const cache = nodeData.inputs?.cache as BaseCache
+
+ const obj: BaseBedrockInput & BaseLLMParams = {
+ region: iRegion,
+ model: iModel,
+ maxTokens: parseInt(iMax_tokens_to_sample, 10),
+ temperature: parseFloat(iTemperature)
+ }
+
+ /**
+ * Long-term credentials specified in LLM configuration are optional.
+ * Bedrock's credential provider falls back to the AWS SDK to fetch
+ * credentials from the running environment.
+ * When specified, we override the default provider with configured values.
+ * @see https://github.com/aws/aws-sdk-js-v3/blob/main/packages/credential-provider-node/README.md
+ */
+ const credentialData = await getCredentialData(nodeData.credential ?? '', options)
+ if (credentialData && Object.keys(credentialData).length !== 0) {
+ const credentialApiKey = getCredentialParam('awsKey', credentialData, nodeData)
+ const credentialApiSecret = getCredentialParam('awsSecret', credentialData, nodeData)
+ const credentialApiSession = getCredentialParam('awsSession', credentialData, nodeData)
+
+ obj.credentials = {
+ accessKeyId: credentialApiKey,
+ secretAccessKey: credentialApiSecret,
+ sessionToken: credentialApiSession
+ }
+ }
+ if (cache) obj.cache = cache
+
+ const amazonBedrock = new ChatBedrock(obj)
+ return amazonBedrock
+ }
+}
+
+module.exports = { nodeClass: AWSChatBedrock_ChatModels }
diff --git a/packages/components/nodes/chatmodels/AWSBedrock/awsBedrock.png b/packages/components/nodes/chatmodels/AWSBedrock/awsBedrock.png
new file mode 100644
index 000000000..483bc69a9
Binary files /dev/null and b/packages/components/nodes/chatmodels/AWSBedrock/awsBedrock.png differ
diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts
index 90f430f04..99e151e6c 100644
--- a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts
+++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts
@@ -2,6 +2,8 @@ import { OpenAIBaseInput } from 'langchain/dist/types/openai-types'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { AzureOpenAIInput, ChatOpenAI } from 'langchain/chat_models/openai'
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
class AzureChatOpenAI_ChatModels implements INode {
label: string
@@ -18,7 +20,7 @@ class AzureChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'Azure ChatOpenAI'
this.name = 'azureChatOpenAI'
- this.version = 1.0
+ this.version = 2.0
this.type = 'AzureChatOpenAI'
this.icon = 'Azure.svg'
this.category = 'Chat Models'
@@ -31,6 +33,12 @@ class AzureChatOpenAI_ChatModels implements INode {
credentialNames: ['azureOpenAIApi']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -107,6 +115,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const presencePenalty = nodeData.inputs?.presencePenalty as string
const timeout = nodeData.inputs?.timeout as string
const streaming = nodeData.inputs?.streaming as boolean
+ const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
@@ -114,7 +123,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
- const obj: Partial & Partial = {
+ const obj: Partial & BaseLLMParams & Partial = {
temperature: parseFloat(temperature),
modelName,
azureOpenAIApiKey,
@@ -128,6 +137,7 @@ class AzureChatOpenAI_ChatModels implements INode {
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
if (timeout) obj.timeout = parseInt(timeout, 10)
+ if (cache) obj.cache = cache
const model = new ChatOpenAI(obj)
return model
diff --git a/packages/components/nodes/chatmodels/Bittensor/Bittensor.ts b/packages/components/nodes/chatmodels/Bittensor/Bittensor.ts
new file mode 100644
index 000000000..36b084e64
--- /dev/null
+++ b/packages/components/nodes/chatmodels/Bittensor/Bittensor.ts
@@ -0,0 +1,57 @@
+import { INode, INodeData, INodeParams } from '../../../src/Interface'
+import { getBaseClasses } from '../../../src/utils'
+import { NIBittensorChatModel, BittensorInput } from 'langchain/experimental/chat_models/bittensor'
+import { BaseCache } from 'langchain/schema'
+
+class Bittensor_ChatModels implements INode {
+ label: string
+ name: string
+ version: number
+ type: string
+ icon: string
+ category: string
+ description: string
+ baseClasses: string[]
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'NIBittensorChat'
+ this.name = 'NIBittensorChatModel'
+ this.version = 2.0
+ this.type = 'BittensorChat'
+ this.icon = 'logo.png'
+ this.category = 'Chat Models'
+ this.description = 'Wrapper around Bittensor subnet 1 large language models'
+ this.baseClasses = [this.type, ...getBaseClasses(NIBittensorChatModel)]
+ this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
+ {
+ label: 'System prompt',
+ name: 'system_prompt',
+ type: 'string',
+ additionalParams: true,
+ optional: true
+ }
+ ]
+ }
+
+ async init(nodeData: INodeData, _: string): Promise {
+ const system_prompt = nodeData.inputs?.system_prompt as string
+ const cache = nodeData.inputs?.cache as BaseCache
+
+ const obj: Partial = {
+ systemPrompt: system_prompt
+ }
+ if (cache) obj.cache = cache
+
+ const model = new NIBittensorChatModel(obj)
+ return model
+ }
+}
+
+module.exports = { nodeClass: Bittensor_ChatModels }
diff --git a/packages/components/nodes/chatmodels/Bittensor/logo.png b/packages/components/nodes/chatmodels/Bittensor/logo.png
new file mode 100644
index 000000000..ad51774d5
Binary files /dev/null and b/packages/components/nodes/chatmodels/Bittensor/logo.png differ
diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts
index 12a33d994..f16968b67 100644
--- a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts
+++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts
@@ -1,6 +1,8 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { AnthropicInput, ChatAnthropic } from 'langchain/chat_models/anthropic'
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
class ChatAnthropic_ChatModels implements INode {
label: string
@@ -17,7 +19,7 @@ class ChatAnthropic_ChatModels implements INode {
constructor() {
this.label = 'ChatAnthropic'
this.name = 'chatAnthropic'
- this.version = 1.0
+ this.version = 2.0
this.type = 'ChatAnthropic'
this.icon = 'chatAnthropic.png'
this.category = 'Chat Models'
@@ -30,6 +32,12 @@ class ChatAnthropic_ChatModels implements INode {
credentialNames: ['anthropicApi']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -135,11 +143,12 @@ class ChatAnthropic_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
const streaming = nodeData.inputs?.streaming as boolean
+ const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
- const obj: Partial & { anthropicApiKey?: string } = {
+ const obj: Partial & BaseLLMParams & { anthropicApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
anthropicApiKey,
@@ -149,6 +158,7 @@ class ChatAnthropic_ChatModels implements INode {
if (maxTokensToSample) obj.maxTokensToSample = parseInt(maxTokensToSample, 10)
if (topP) obj.topP = parseFloat(topP)
if (topK) obj.topK = parseFloat(topK)
+ if (cache) obj.cache = cache
const model = new ChatAnthropic(obj)
return model
diff --git a/packages/components/nodes/chatmodels/ChatGooglePaLM/ChatGooglePaLM.ts b/packages/components/nodes/chatmodels/ChatGooglePaLM/ChatGooglePaLM.ts
index 642ddb5e3..121406c76 100644
--- a/packages/components/nodes/chatmodels/ChatGooglePaLM/ChatGooglePaLM.ts
+++ b/packages/components/nodes/chatmodels/ChatGooglePaLM/ChatGooglePaLM.ts
@@ -1,6 +1,7 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatGooglePaLM, GooglePaLMChatInput } from 'langchain/chat_models/googlepalm'
+import { BaseCache } from 'langchain/schema'
class ChatGooglePaLM_ChatModels implements INode {
label: string
@@ -17,7 +18,7 @@ class ChatGooglePaLM_ChatModels implements INode {
constructor() {
this.label = 'ChatGooglePaLM'
this.name = 'chatGooglePaLM'
- this.version = 1.0
+ this.version = 2.0
this.type = 'ChatGooglePaLM'
this.icon = 'Google_PaLM_Logo.svg'
this.category = 'Chat Models'
@@ -30,6 +31,12 @@ class ChatGooglePaLM_ChatModels implements INode {
credentialNames: ['googleMakerSuite']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -96,6 +103,7 @@ class ChatGooglePaLM_ChatModels implements INode {
const temperature = nodeData.inputs?.temperature as string
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
+ const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const googleMakerSuiteKey = getCredentialParam('googleMakerSuiteKey', credentialData, nodeData)
@@ -108,6 +116,7 @@ class ChatGooglePaLM_ChatModels implements INode {
if (topP) obj.topP = parseFloat(topP)
if (topK) obj.topK = parseFloat(topK)
+ if (cache) obj.cache = cache
const model = new ChatGooglePaLM(obj)
return model
diff --git a/packages/components/nodes/chatmodels/ChatGoogleVertexAI/ChatGoogleVertexAI.ts b/packages/components/nodes/chatmodels/ChatGoogleVertexAI/ChatGoogleVertexAI.ts
index 4cb206f53..6b070bd93 100644
--- a/packages/components/nodes/chatmodels/ChatGoogleVertexAI/ChatGoogleVertexAI.ts
+++ b/packages/components/nodes/chatmodels/ChatGoogleVertexAI/ChatGoogleVertexAI.ts
@@ -2,6 +2,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatGoogleVertexAI, GoogleVertexAIChatInput } from 'langchain/chat_models/googlevertexai'
import { GoogleAuthOptions } from 'google-auth-library'
+import { BaseCache } from 'langchain/schema'
class GoogleVertexAI_ChatModels implements INode {
label: string
@@ -18,7 +19,7 @@ class GoogleVertexAI_ChatModels implements INode {
constructor() {
this.label = 'ChatGoogleVertexAI'
this.name = 'chatGoogleVertexAI'
- this.version = 1.0
+ this.version = 2.0
this.type = 'ChatGoogleVertexAI'
this.icon = 'vertexai.svg'
this.category = 'Chat Models'
@@ -34,6 +35,12 @@ class GoogleVertexAI_ChatModels implements INode {
'Google Vertex AI credential. If you are using a GCP service like Cloud Run, or if you have installed default credentials on your local machine, you do not need to set this credential.'
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -113,6 +120,7 @@ class GoogleVertexAI_ChatModels implements INode {
const modelName = nodeData.inputs?.modelName as string
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
const topP = nodeData.inputs?.topP as string
+ const cache = nodeData.inputs?.cache as BaseCache
const obj: GoogleVertexAIChatInput = {
temperature: parseFloat(temperature),
@@ -122,6 +130,7 @@ class GoogleVertexAI_ChatModels implements INode {
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
if (topP) obj.topP = parseFloat(topP)
+ if (cache) obj.cache = cache
const model = new ChatGoogleVertexAI(obj)
return model
diff --git a/packages/components/nodes/chatmodels/ChatHuggingFace/ChatHuggingFace.ts b/packages/components/nodes/chatmodels/ChatHuggingFace/ChatHuggingFace.ts
index ee55c7bb9..153c5d100 100644
--- a/packages/components/nodes/chatmodels/ChatHuggingFace/ChatHuggingFace.ts
+++ b/packages/components/nodes/chatmodels/ChatHuggingFace/ChatHuggingFace.ts
@@ -1,6 +1,7 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { HFInput, HuggingFaceInference } from './core'
+import { BaseCache } from 'langchain/schema'
class ChatHuggingFace_ChatModels implements INode {
label: string
@@ -17,7 +18,7 @@ class ChatHuggingFace_ChatModels implements INode {
constructor() {
this.label = 'ChatHuggingFace'
this.name = 'chatHuggingFace'
- this.version = 1.0
+ this.version = 2.0
this.type = 'ChatHuggingFace'
this.icon = 'huggingface.png'
this.category = 'Chat Models'
@@ -30,6 +31,12 @@ class ChatHuggingFace_ChatModels implements INode {
credentialNames: ['huggingFaceApi']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model',
name: 'model',
@@ -102,6 +109,7 @@ class ChatHuggingFace_ChatModels implements INode {
const hfTopK = nodeData.inputs?.hfTopK as string
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
const endpoint = nodeData.inputs?.endpoint as string
+ const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
@@ -119,6 +127,7 @@ class ChatHuggingFace_ChatModels implements INode {
if (endpoint) obj.endpoint = endpoint
const huggingFace = new HuggingFaceInference(obj)
+ if (cache) huggingFace.cache = cache
return huggingFace
}
}
diff --git a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts
index a6ddfae42..18ed409bf 100644
--- a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts
+++ b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts
@@ -2,6 +2,8 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { OpenAIChat } from 'langchain/llms/openai'
import { OpenAIChatInput } from 'langchain/chat_models/openai'
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
class ChatLocalAI_ChatModels implements INode {
label: string
@@ -17,13 +19,19 @@ class ChatLocalAI_ChatModels implements INode {
constructor() {
this.label = 'ChatLocalAI'
this.name = 'chatLocalAI'
- this.version = 1.0
+ this.version = 2.0
this.type = 'ChatLocalAI'
this.icon = 'localai.png'
this.category = 'Chat Models'
this.description = 'Use local LLMs like llama.cpp, gpt4all using LocalAI'
this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(OpenAIChat)]
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Base Path',
name: 'basePath',
@@ -78,8 +86,9 @@ class ChatLocalAI_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string
const timeout = nodeData.inputs?.timeout as string
const basePath = nodeData.inputs?.basePath as string
+ const cache = nodeData.inputs?.cache as BaseCache
- const obj: Partial & { openAIApiKey?: string } = {
+ const obj: Partial & BaseLLMParams & { openAIApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: 'sk-'
@@ -88,6 +97,7 @@ class ChatLocalAI_ChatModels implements INode {
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
+ if (cache) obj.cache = cache
const model = new OpenAIChat(obj, { basePath })
diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts
index ca081ff43..f74bd642f 100644
--- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts
+++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts
@@ -1,6 +1,8 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
class ChatOpenAI_ChatModels implements INode {
label: string
@@ -17,7 +19,7 @@ class ChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'ChatOpenAI'
this.name = 'chatOpenAI'
- this.version = 1.0
+ this.version = 2.0
this.type = 'ChatOpenAI'
this.icon = 'openai.png'
this.category = 'Chat Models'
@@ -30,6 +32,12 @@ class ChatOpenAI_ChatModels implements INode {
credentialNames: ['openAIApi']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -151,7 +159,9 @@ class ChatOpenAI_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
- const obj: Partial & { openAIApiKey?: string } = {
+ const cache = nodeData.inputs?.cache as BaseCache
+
+ const obj: Partial & BaseLLMParams & { openAIApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
@@ -163,6 +173,7 @@ class ChatOpenAI_ChatModels implements INode {
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
if (timeout) obj.timeout = parseInt(timeout, 10)
+ if (cache) obj.cache = cache
let parsedBaseOptions: any | undefined = undefined
diff --git a/packages/components/nodes/chatmodels/ChatOpenAICustom/ChatOpenAICustom.ts b/packages/components/nodes/chatmodels/ChatOpenAICustom/ChatOpenAICustom.ts
index 29c1181a6..2a01a2e54 100644
--- a/packages/components/nodes/chatmodels/ChatOpenAICustom/ChatOpenAICustom.ts
+++ b/packages/components/nodes/chatmodels/ChatOpenAICustom/ChatOpenAICustom.ts
@@ -1,6 +1,8 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
class ChatOpenAICustom_ChatModels implements INode {
label: string
@@ -17,7 +19,7 @@ class ChatOpenAICustom_ChatModels implements INode {
constructor() {
this.label = 'ChatOpenAI Custom'
this.name = 'chatOpenAICustom'
- this.version = 1.0
+ this.version = 2.0
this.type = 'ChatOpenAI-Custom'
this.icon = 'openai.png'
this.category = 'Chat Models'
@@ -31,6 +33,12 @@ class ChatOpenAICustom_ChatModels implements INode {
optional: true
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -113,11 +121,12 @@ class ChatOpenAICustom_ChatModels implements INode {
const streaming = nodeData.inputs?.streaming as boolean
const basePath = nodeData.inputs?.basepath as string
const baseOptions = nodeData.inputs?.baseOptions
+ const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
- const obj: Partial & { openAIApiKey?: string } = {
+ const obj: Partial & BaseLLMParams & { openAIApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
@@ -129,6 +138,7 @@ class ChatOpenAICustom_ChatModels implements INode {
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
if (timeout) obj.timeout = parseInt(timeout, 10)
+ if (cache) obj.cache = cache
let parsedBaseOptions: any | undefined = undefined
diff --git a/packages/components/nodes/documentloaders/PlainText/PlainText.ts b/packages/components/nodes/documentloaders/PlainText/PlainText.ts
new file mode 100644
index 000000000..c2adceeb6
--- /dev/null
+++ b/packages/components/nodes/documentloaders/PlainText/PlainText.ts
@@ -0,0 +1,112 @@
+import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
+import { TextSplitter } from 'langchain/text_splitter'
+import { Document } from 'langchain/document'
+import { handleEscapeCharacters } from '../../../src'
+
+class PlainText_DocumentLoaders implements INode {
+ label: string
+ name: string
+ version: number
+ description: string
+ type: string
+ icon: string
+ category: string
+ baseClasses: string[]
+ inputs: INodeParams[]
+ outputs: INodeOutputsValue[]
+
+ constructor() {
+ this.label = 'Plain Text'
+ this.name = 'plainText'
+ this.version = 2.0
+ this.type = 'Document'
+ this.icon = 'plaintext.svg'
+ this.category = 'Document Loaders'
+ this.description = `Load data from plain text`
+ this.baseClasses = [this.type]
+ this.inputs = [
+ {
+ label: 'Text',
+ name: 'text',
+ type: 'string',
+ rows: 4,
+ placeholder:
+ 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua...'
+ },
+ {
+ label: 'Text Splitter',
+ name: 'textSplitter',
+ type: 'TextSplitter',
+ optional: true
+ },
+ {
+ label: 'Metadata',
+ name: 'metadata',
+ type: 'json',
+ optional: true,
+ additionalParams: true
+ }
+ ]
+ this.outputs = [
+ {
+ label: 'Document',
+ name: 'document',
+ baseClasses: this.baseClasses
+ },
+ {
+ label: 'Text',
+ name: 'text',
+ baseClasses: ['string', 'json']
+ }
+ ]
+ }
+
+ async init(nodeData: INodeData): Promise {
+ const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
+ const text = nodeData.inputs?.text as string
+ const metadata = nodeData.inputs?.metadata
+ const output = nodeData.outputs?.output as string
+
+ let alldocs: Document>[] = []
+
+ if (textSplitter) {
+ const docs = await textSplitter.createDocuments([text])
+ alldocs.push(...docs)
+ } else {
+ alldocs.push(
+ new Document({
+ pageContent: text
+ })
+ )
+ }
+
+ let finaldocs: Document>[] = []
+ if (metadata) {
+ const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)
+ for (const doc of alldocs) {
+ const newdoc = {
+ ...doc,
+ metadata: {
+ ...doc.metadata,
+ ...parsedMetadata
+ }
+ }
+ finaldocs.push(newdoc)
+ }
+ } else {
+ finaldocs = alldocs
+ }
+
+ if (output === 'document') {
+ return finaldocs
+ } else {
+ let finaltext = ''
+ for (const doc of finaldocs) {
+ finaltext += `${doc.pageContent}\n`
+ }
+ return handleEscapeCharacters(finaltext, false)
+ }
+ }
+}
+
+module.exports = { nodeClass: PlainText_DocumentLoaders }
diff --git a/packages/components/nodes/documentloaders/PlainText/plaintext.svg b/packages/components/nodes/documentloaders/PlainText/plaintext.svg
new file mode 100644
index 000000000..b9fec0355
--- /dev/null
+++ b/packages/components/nodes/documentloaders/PlainText/plaintext.svg
@@ -0,0 +1,7 @@
+
\ No newline at end of file
diff --git a/packages/components/nodes/documentloaders/Text/Text.ts b/packages/components/nodes/documentloaders/Text/Text.ts
index dacf087c9..c3e3b61e8 100644
--- a/packages/components/nodes/documentloaders/Text/Text.ts
+++ b/packages/components/nodes/documentloaders/Text/Text.ts
@@ -1,6 +1,8 @@
-import { INode, INodeData, INodeParams } from '../../../src/Interface'
+import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { TextLoader } from 'langchain/document_loaders/fs/text'
+import { Document } from 'langchain/document'
+import { handleEscapeCharacters } from '../../../src'
class Text_DocumentLoaders implements INode {
label: string
@@ -12,11 +14,12 @@ class Text_DocumentLoaders implements INode {
category: string
baseClasses: string[]
inputs: INodeParams[]
+ outputs: INodeOutputsValue[]
constructor() {
this.label = 'Text File'
this.name = 'textFile'
- this.version = 1.0
+ this.version = 2.0
this.type = 'Document'
this.icon = 'textFile.svg'
this.category = 'Document Loaders'
@@ -43,12 +46,25 @@ class Text_DocumentLoaders implements INode {
additionalParams: true
}
]
+ this.outputs = [
+ {
+ label: 'Document',
+ name: 'document',
+ baseClasses: this.baseClasses
+ },
+ {
+ label: 'Text',
+ name: 'text',
+ baseClasses: ['string', 'json']
+ }
+ ]
}
async init(nodeData: INodeData): Promise {
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
const txtFileBase64 = nodeData.inputs?.txtFile as string
const metadata = nodeData.inputs?.metadata
+ const output = nodeData.outputs?.output as string
let alldocs = []
let files: string[] = []
@@ -75,9 +91,9 @@ class Text_DocumentLoaders implements INode {
}
}
+ let finaldocs: Document>[] = []
if (metadata) {
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)
- let finaldocs = []
for (const doc of alldocs) {
const newdoc = {
...doc,
@@ -88,9 +104,19 @@ class Text_DocumentLoaders implements INode {
}
finaldocs.push(newdoc)
}
- return finaldocs
+ } else {
+ finaldocs = alldocs
+ }
+
+ if (output === 'document') {
+ return finaldocs
+ } else {
+ let finaltext = ''
+ for (const doc of finaldocs) {
+ finaltext += `${doc.pageContent}\n`
+ }
+ return handleEscapeCharacters(finaltext, false)
}
- return alldocs
}
}
diff --git a/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts b/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts
new file mode 100644
index 000000000..8f57daacd
--- /dev/null
+++ b/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts
@@ -0,0 +1,175 @@
+import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
+import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
+import { Bedrock } from 'langchain/llms/bedrock'
+import { BaseBedrockInput } from 'langchain/dist/util/bedrock'
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
+
+/**
+ * I had to run the following to build the component
+ * and get the icon copied over to the dist directory
+ * Flowise/packages/components > yarn build
+ *
+ * @author Michael Connor
+ */
+class AWSBedrock_LLMs implements INode {
+ label: string
+ name: string
+ version: number
+ type: string
+ icon: string
+ category: string
+ description: string
+ baseClasses: string[]
+ credential: INodeParams
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'AWS Bedrock'
+ this.name = 'awsBedrock'
+ this.version = 1.2
+ this.type = 'AWSBedrock'
+ this.icon = 'awsBedrock.png'
+ this.category = 'LLMs'
+ this.description = 'Wrapper around AWS Bedrock large language models'
+ this.baseClasses = [this.type, ...getBaseClasses(Bedrock)]
+ this.credential = {
+ label: 'AWS Credential',
+ name: 'credential',
+ type: 'credential',
+ credentialNames: ['awsApi'],
+ optional: true
+ }
+ this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
+ {
+ label: 'Region',
+ name: 'region',
+ type: 'options',
+ options: [
+ { label: 'af-south-1', name: 'af-south-1' },
+ { label: 'ap-east-1', name: 'ap-east-1' },
+ { label: 'ap-northeast-1', name: 'ap-northeast-1' },
+ { label: 'ap-northeast-2', name: 'ap-northeast-2' },
+ { label: 'ap-northeast-3', name: 'ap-northeast-3' },
+ { label: 'ap-south-1', name: 'ap-south-1' },
+ { label: 'ap-south-2', name: 'ap-south-2' },
+ { label: 'ap-southeast-1', name: 'ap-southeast-1' },
+ { label: 'ap-southeast-2', name: 'ap-southeast-2' },
+ { label: 'ap-southeast-3', name: 'ap-southeast-3' },
+ { label: 'ap-southeast-4', name: 'ap-southeast-4' },
+ { label: 'ap-southeast-5', name: 'ap-southeast-5' },
+ { label: 'ap-southeast-6', name: 'ap-southeast-6' },
+ { label: 'ca-central-1', name: 'ca-central-1' },
+ { label: 'ca-west-1', name: 'ca-west-1' },
+ { label: 'cn-north-1', name: 'cn-north-1' },
+ { label: 'cn-northwest-1', name: 'cn-northwest-1' },
+ { label: 'eu-central-1', name: 'eu-central-1' },
+ { label: 'eu-central-2', name: 'eu-central-2' },
+ { label: 'eu-north-1', name: 'eu-north-1' },
+ { label: 'eu-south-1', name: 'eu-south-1' },
+ { label: 'eu-south-2', name: 'eu-south-2' },
+ { label: 'eu-west-1', name: 'eu-west-1' },
+ { label: 'eu-west-2', name: 'eu-west-2' },
+ { label: 'eu-west-3', name: 'eu-west-3' },
+ { label: 'il-central-1', name: 'il-central-1' },
+ { label: 'me-central-1', name: 'me-central-1' },
+ { label: 'me-south-1', name: 'me-south-1' },
+ { label: 'sa-east-1', name: 'sa-east-1' },
+ { label: 'us-east-1', name: 'us-east-1' },
+ { label: 'us-east-2', name: 'us-east-2' },
+ { label: 'us-gov-east-1', name: 'us-gov-east-1' },
+ { label: 'us-gov-west-1', name: 'us-gov-west-1' },
+ { label: 'us-west-1', name: 'us-west-1' },
+ { label: 'us-west-2', name: 'us-west-2' }
+ ],
+ default: 'us-east-1',
+ optional: false
+ },
+ {
+ label: 'Model Name',
+ name: 'model',
+ type: 'options',
+ options: [
+ { label: 'amazon.titan-tg1-large', name: 'amazon.titan-tg1-large' },
+ { label: 'amazon.titan-e1t-medium', name: 'amazon.titan-e1t-medium' },
+ { label: 'stability.stable-diffusion-xl', name: 'stability.stable-diffusion-xl' },
+ { label: 'ai21.j2-grande-instruct', name: 'ai21.j2-grande-instruct' },
+ { label: 'ai21.j2-jumbo-instruct', name: 'ai21.j2-jumbo-instruct' },
+ { label: 'ai21.j2-mid', name: 'ai21.j2-mid' },
+ { label: 'ai21.j2-ultra', name: 'ai21.j2-ultra' },
+ { label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
+ { label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
+ { label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' }
+ ],
+ default: 'anthropic.claude-v2',
+ optional: false
+ },
+ {
+ label: 'Temperature',
+ name: 'temperature',
+ type: 'number',
+ step: 0.1,
+ description: 'Temperature parameter may not apply to certain model. Please check available model parameters',
+ optional: true,
+ default: 0.7,
+ additionalParams: false
+ },
+ {
+ label: 'Max Tokens to Sample',
+ name: 'max_tokens_to_sample',
+ type: 'number',
+ step: 10,
+ description: 'Max Tokens parameter may not apply to certain model. Please check available model parameters',
+ optional: false,
+ default: 200,
+ additionalParams: false
+ }
+ ]
+ }
+
+ async init(nodeData: INodeData, _: string, options: ICommonObject): Promise {
+ const iRegion = nodeData.inputs?.region as string
+ const iModel = nodeData.inputs?.model as string
+ const iTemperature = nodeData.inputs?.temperature as string
+ const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
+ const cache = nodeData.inputs?.cache as BaseCache
+ const obj: Partial & BaseLLMParams = {
+ model: iModel,
+ region: iRegion,
+ temperature: parseFloat(iTemperature),
+ maxTokens: parseInt(iMax_tokens_to_sample, 10)
+ }
+
+ /**
+ * Long-term credentials specified in LLM configuration are optional.
+ * Bedrock's credential provider falls back to the AWS SDK to fetch
+ * credentials from the running environment.
+ * When specified, we override the default provider with configured values.
+ * @see https://github.com/aws/aws-sdk-js-v3/blob/main/packages/credential-provider-node/README.md
+ */
+ const credentialData = await getCredentialData(nodeData.credential ?? '', options)
+ if (credentialData && Object.keys(credentialData).length !== 0) {
+ const credentialApiKey = getCredentialParam('awsKey', credentialData, nodeData)
+ const credentialApiSecret = getCredentialParam('awsSecret', credentialData, nodeData)
+ const credentialApiSession = getCredentialParam('awsSession', credentialData, nodeData)
+
+ obj.credentials = {
+ accessKeyId: credentialApiKey,
+ secretAccessKey: credentialApiSecret,
+ sessionToken: credentialApiSession
+ }
+ }
+ if (cache) obj.cache = cache
+
+ const amazonBedrock = new Bedrock(obj)
+ return amazonBedrock
+ }
+}
+
+module.exports = { nodeClass: AWSBedrock_LLMs }
diff --git a/packages/components/nodes/llms/AWSBedrock/awsBedrock.png b/packages/components/nodes/llms/AWSBedrock/awsBedrock.png
new file mode 100644
index 000000000..483bc69a9
Binary files /dev/null and b/packages/components/nodes/llms/AWSBedrock/awsBedrock.png differ
diff --git a/packages/components/nodes/llms/Azure OpenAI/AzureOpenAI.ts b/packages/components/nodes/llms/Azure OpenAI/AzureOpenAI.ts
index f48c4642b..f50e3d95d 100644
--- a/packages/components/nodes/llms/Azure OpenAI/AzureOpenAI.ts
+++ b/packages/components/nodes/llms/Azure OpenAI/AzureOpenAI.ts
@@ -1,7 +1,8 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { AzureOpenAIInput, OpenAI, OpenAIInput } from 'langchain/llms/openai'
-
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
class AzureOpenAI_LLMs implements INode {
label: string
name: string
@@ -17,7 +18,7 @@ class AzureOpenAI_LLMs implements INode {
constructor() {
this.label = 'Azure OpenAI'
this.name = 'azureOpenAI'
- this.version = 1.0
+ this.version = 2.0
this.type = 'AzureOpenAI'
this.icon = 'Azure.svg'
this.category = 'LLMs'
@@ -30,6 +31,12 @@ class AzureOpenAI_LLMs implements INode {
credentialNames: ['azureOpenAIApi']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -163,7 +170,9 @@ class AzureOpenAI_LLMs implements INode {
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
- const obj: Partial & Partial = {
+ const cache = nodeData.inputs?.cache as BaseCache
+
+ const obj: Partial & BaseLLMParams & Partial = {
temperature: parseFloat(temperature),
modelName,
azureOpenAIApiKey,
@@ -179,6 +188,7 @@ class AzureOpenAI_LLMs implements INode {
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
if (timeout) obj.timeout = parseInt(timeout, 10)
if (bestOf) obj.bestOf = parseInt(bestOf, 10)
+ if (cache) obj.cache = cache
const model = new OpenAI(obj)
return model
diff --git a/packages/components/nodes/llms/Bittensor/Bittensor.ts b/packages/components/nodes/llms/Bittensor/Bittensor.ts
new file mode 100644
index 000000000..e6cc2bb61
--- /dev/null
+++ b/packages/components/nodes/llms/Bittensor/Bittensor.ts
@@ -0,0 +1,68 @@
+import { INode, INodeData, INodeParams } from '../../../src/Interface'
+import { getBaseClasses } from '../../../src/utils'
+import { NIBittensorLLM, BittensorInput } from 'langchain/experimental/llms/bittensor'
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
+
+class Bittensor_LLMs implements INode {
+ label: string
+ name: string
+ version: number
+ type: string
+ icon: string
+ category: string
+ description: string
+ baseClasses: string[]
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'NIBittensorLLM'
+ this.name = 'NIBittensorLLM'
+ this.version = 2.0
+ this.type = 'Bittensor'
+ this.icon = 'logo.png'
+ this.category = 'LLMs'
+ this.description = 'Wrapper around Bittensor subnet 1 large language models'
+ this.baseClasses = [this.type, ...getBaseClasses(NIBittensorLLM)]
+ this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
+ {
+ label: 'System prompt',
+ name: 'system_prompt',
+ type: 'string',
+ additionalParams: true,
+ optional: true
+ },
+ {
+ label: 'Top Responses',
+ name: 'topResponses',
+ type: 'number',
+ step: 1,
+ optional: true,
+ additionalParams: true
+ }
+ ]
+ }
+
+ async init(nodeData: INodeData, _: string): Promise {
+ const system_prompt = nodeData.inputs?.system_prompt as string
+ const topResponses = Number(nodeData.inputs?.topResponses as number)
+ const cache = nodeData.inputs?.cache as BaseCache
+
+ const obj: Partial & BaseLLMParams = {
+ systemPrompt: system_prompt,
+ topResponses: topResponses
+ }
+ if (cache) obj.cache = cache
+
+ const model = new NIBittensorLLM(obj)
+ return model
+ }
+}
+
+module.exports = { nodeClass: Bittensor_LLMs }
diff --git a/packages/components/nodes/llms/Bittensor/logo.png b/packages/components/nodes/llms/Bittensor/logo.png
new file mode 100644
index 000000000..ad51774d5
Binary files /dev/null and b/packages/components/nodes/llms/Bittensor/logo.png differ
diff --git a/packages/components/nodes/llms/Cohere/Cohere.ts b/packages/components/nodes/llms/Cohere/Cohere.ts
index 4a3a8a807..3fde0af00 100644
--- a/packages/components/nodes/llms/Cohere/Cohere.ts
+++ b/packages/components/nodes/llms/Cohere/Cohere.ts
@@ -1,6 +1,7 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { Cohere, CohereInput } from './core'
+import { BaseCache } from 'langchain/schema'
class Cohere_LLMs implements INode {
label: string
@@ -17,7 +18,7 @@ class Cohere_LLMs implements INode {
constructor() {
this.label = 'Cohere'
this.name = 'cohere'
- this.version = 1.0
+ this.version = 2.0
this.type = 'Cohere'
this.icon = 'cohere.png'
this.category = 'LLMs'
@@ -30,6 +31,12 @@ class Cohere_LLMs implements INode {
credentialNames: ['cohereApi']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -85,7 +92,7 @@ class Cohere_LLMs implements INode {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const maxTokens = nodeData.inputs?.maxTokens as string
-
+ const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const cohereApiKey = getCredentialParam('cohereApiKey', credentialData, nodeData)
@@ -96,7 +103,7 @@ class Cohere_LLMs implements INode {
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (modelName) obj.model = modelName
if (temperature) obj.temperature = parseFloat(temperature)
-
+ if (cache) obj.cache = cache
const model = new Cohere(obj)
return model
}
diff --git a/packages/components/nodes/llms/GooglePaLM/GooglePaLM.ts b/packages/components/nodes/llms/GooglePaLM/GooglePaLM.ts
index 246303608..d3212a1cd 100644
--- a/packages/components/nodes/llms/GooglePaLM/GooglePaLM.ts
+++ b/packages/components/nodes/llms/GooglePaLM/GooglePaLM.ts
@@ -1,7 +1,7 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { GooglePaLM, GooglePaLMTextInput } from 'langchain/llms/googlepalm'
-
+import { BaseCache } from 'langchain/schema'
class GooglePaLM_LLMs implements INode {
label: string
name: string
@@ -17,7 +17,7 @@ class GooglePaLM_LLMs implements INode {
constructor() {
this.label = 'GooglePaLM'
this.name = 'GooglePaLM'
- this.version = 1.0
+ this.version = 2.0
this.type = 'GooglePaLM'
this.icon = 'Google_PaLM_Logo.svg'
this.category = 'LLMs'
@@ -30,6 +30,12 @@ class GooglePaLM_LLMs implements INode {
credentialNames: ['googleMakerSuite']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -126,6 +132,7 @@ class GooglePaLM_LLMs implements INode {
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
const stopSequencesObj = nodeData.inputs?.stopSequencesObj
+ const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const googleMakerSuiteKey = getCredentialParam('googleMakerSuiteKey', credentialData, nodeData)
@@ -139,6 +146,7 @@ class GooglePaLM_LLMs implements INode {
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (topK) obj.topK = parseFloat(topK)
+ if (cache) obj.cache = cache
let parsedStopSequences: any | undefined = undefined
if (stopSequencesObj) {
diff --git a/packages/components/nodes/llms/GoogleVertexAI/GoogleVertexAI.ts b/packages/components/nodes/llms/GoogleVertexAI/GoogleVertexAI.ts
index 4d19d04f0..6b6d534ba 100644
--- a/packages/components/nodes/llms/GoogleVertexAI/GoogleVertexAI.ts
+++ b/packages/components/nodes/llms/GoogleVertexAI/GoogleVertexAI.ts
@@ -2,6 +2,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { GoogleVertexAI, GoogleVertexAITextInput } from 'langchain/llms/googlevertexai'
import { GoogleAuthOptions } from 'google-auth-library'
+import { BaseCache } from 'langchain/schema'
class GoogleVertexAI_LLMs implements INode {
label: string
@@ -18,7 +19,7 @@ class GoogleVertexAI_LLMs implements INode {
constructor() {
this.label = 'GoogleVertexAI'
this.name = 'googlevertexai'
- this.version = 1.0
+ this.version = 2.0
this.type = 'GoogleVertexAI'
this.icon = 'vertexai.svg'
this.category = 'LLMs'
@@ -34,6 +35,12 @@ class GoogleVertexAI_LLMs implements INode {
'Google Vertex AI credential. If you are using a GCP service like Cloud Run, or if you have installed default credentials on your local machine, you do not need to set this credential.'
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -120,6 +127,7 @@ class GoogleVertexAI_LLMs implements INode {
const modelName = nodeData.inputs?.modelName as string
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
const topP = nodeData.inputs?.topP as string
+ const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial = {
temperature: parseFloat(temperature),
@@ -129,6 +137,7 @@ class GoogleVertexAI_LLMs implements INode {
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
if (topP) obj.topP = parseFloat(topP)
+ if (cache) obj.cache = cache
const model = new GoogleVertexAI(obj)
return model
diff --git a/packages/components/nodes/llms/HuggingFaceInference/HuggingFaceInference.ts b/packages/components/nodes/llms/HuggingFaceInference/HuggingFaceInference.ts
index c7f6a37e8..8dcf021bb 100644
--- a/packages/components/nodes/llms/HuggingFaceInference/HuggingFaceInference.ts
+++ b/packages/components/nodes/llms/HuggingFaceInference/HuggingFaceInference.ts
@@ -1,6 +1,7 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { HFInput, HuggingFaceInference } from './core'
+import { BaseCache } from 'langchain/schema'
class HuggingFaceInference_LLMs implements INode {
label: string
@@ -17,7 +18,7 @@ class HuggingFaceInference_LLMs implements INode {
constructor() {
this.label = 'HuggingFace Inference'
this.name = 'huggingFaceInference_LLMs'
- this.version = 1.0
+ this.version = 2.0
this.type = 'HuggingFaceInference'
this.icon = 'huggingface.png'
this.category = 'LLMs'
@@ -30,6 +31,12 @@ class HuggingFaceInference_LLMs implements INode {
credentialNames: ['huggingFaceApi']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model',
name: 'model',
@@ -106,6 +113,8 @@ class HuggingFaceInference_LLMs implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
+ const cache = nodeData.inputs?.cache as BaseCache
+
const obj: Partial = {
model,
apiKey: huggingFaceApiKey
@@ -119,6 +128,8 @@ class HuggingFaceInference_LLMs implements INode {
if (endpoint) obj.endpoint = endpoint
const huggingFace = new HuggingFaceInference(obj)
+ if (cache) huggingFace.cache = cache
+
return huggingFace
}
}
diff --git a/packages/components/nodes/llms/OpenAI/OpenAI.ts b/packages/components/nodes/llms/OpenAI/OpenAI.ts
index 2960ad2a8..9109dd408 100644
--- a/packages/components/nodes/llms/OpenAI/OpenAI.ts
+++ b/packages/components/nodes/llms/OpenAI/OpenAI.ts
@@ -1,6 +1,8 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAI, OpenAIInput } from 'langchain/llms/openai'
+import { BaseLLMParams } from 'langchain/llms/base'
+import { BaseCache } from 'langchain/schema'
class OpenAI_LLMs implements INode {
label: string
@@ -17,7 +19,7 @@ class OpenAI_LLMs implements INode {
constructor() {
this.label = 'OpenAI'
this.name = 'openAI'
- this.version = 2.0
+ this.version = 3.0
this.type = 'OpenAI'
this.icon = 'openai.png'
this.category = 'LLMs'
@@ -30,6 +32,12 @@ class OpenAI_LLMs implements INode {
credentialNames: ['openAIApi']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model Name',
name: 'modelName',
@@ -149,7 +157,9 @@ class OpenAI_LLMs implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
- const obj: Partial & { openAIApiKey?: string } = {
+ const cache = nodeData.inputs?.cache as BaseCache
+
+ const obj: Partial & BaseLLMParams & { openAIApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
@@ -164,8 +174,9 @@ class OpenAI_LLMs implements INode {
if (batchSize) obj.batchSize = parseInt(batchSize, 10)
if (bestOf) obj.bestOf = parseInt(bestOf, 10)
- let parsedBaseOptions: any | undefined = undefined
+ if (cache) obj.cache = cache
+ let parsedBaseOptions: any | undefined = undefined
if (baseOptions) {
try {
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
diff --git a/packages/components/nodes/llms/Replicate/Replicate.ts b/packages/components/nodes/llms/Replicate/Replicate.ts
index 22c6e93aa..fd5373a19 100644
--- a/packages/components/nodes/llms/Replicate/Replicate.ts
+++ b/packages/components/nodes/llms/Replicate/Replicate.ts
@@ -1,6 +1,8 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { Replicate, ReplicateInput } from 'langchain/llms/replicate'
+import { BaseCache } from 'langchain/schema'
+import { BaseLLMParams } from 'langchain/llms/base'
class Replicate_LLMs implements INode {
label: string
@@ -17,7 +19,7 @@ class Replicate_LLMs implements INode {
constructor() {
this.label = 'Replicate'
this.name = 'replicate'
- this.version = 1.0
+ this.version = 2.0
this.type = 'Replicate'
this.icon = 'replicate.svg'
this.category = 'LLMs'
@@ -30,6 +32,12 @@ class Replicate_LLMs implements INode {
credentialNames: ['replicateApi']
}
this.inputs = [
+ {
+ label: 'Cache',
+ name: 'cache',
+ type: 'BaseCache',
+ optional: true
+ },
{
label: 'Model',
name: 'model',
@@ -103,7 +111,9 @@ class Replicate_LLMs implements INode {
const name = modelName.split(':')[0].split('/').pop()
const org = modelName.split(':')[0].split('/')[0]
- const obj: ReplicateInput = {
+ const cache = nodeData.inputs?.cache as BaseCache
+
+ const obj: ReplicateInput & BaseLLMParams = {
model: `${org}/${name}:${version}`,
apiKey
}
@@ -120,6 +130,8 @@ class Replicate_LLMs implements INode {
}
if (Object.keys(inputs).length) obj.input = inputs
+ if (cache) obj.cache = cache
+
const model = new Replicate(obj)
return model
}
diff --git a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts
new file mode 100644
index 000000000..6b5fdf660
--- /dev/null
+++ b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts
@@ -0,0 +1,118 @@
+import { INode, INodeData, INodeParams } from '../../../src/Interface'
+import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
+import { ICommonObject } from '../../../src'
+import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
+import { UpstashRedisChatMessageHistory } from 'langchain/stores/message/upstash_redis'
+
+class UpstashRedisBackedChatMemory_Memory implements INode {
+ label: string
+ name: string
+ version: number
+ description: string
+ type: string
+ icon: string
+ category: string
+ baseClasses: string[]
+ credential: INodeParams
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'Upstash Redis-Backed Chat Memory'
+ this.name = 'upstashRedisBackedChatMemory'
+ this.version = 1.0
+ this.type = 'UpstashRedisBackedChatMemory'
+ this.icon = 'upstash.svg'
+ this.category = 'Memory'
+ this.description = 'Summarizes the conversation and stores the memory in Upstash Redis server'
+ this.baseClasses = [this.type, ...getBaseClasses(BufferMemory)]
+ this.credential = {
+ label: 'Connect Credential',
+ name: 'credential',
+ type: 'credential',
+ description: 'Configure password authentication on your upstash redis instance',
+ credentialNames: ['upstashRedisMemoryApi']
+ }
+ this.inputs = [
+ {
+ label: 'Upstash Redis REST URL',
+ name: 'baseURL',
+ type: 'string',
+ placeholder: 'https://.upstash.io'
+ },
+ {
+ label: 'Session Id',
+ name: 'sessionId',
+ type: 'string',
+ description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId',
+ default: '',
+ additionalParams: true,
+ optional: true
+ },
+ {
+ label: 'Session Timeouts',
+ name: 'sessionTTL',
+ type: 'number',
+ description: 'Omit this parameter to make sessions never expire',
+ additionalParams: true,
+ optional: true
+ }
+ ]
+ }
+
+ async init(nodeData: INodeData, _: string, options: ICommonObject): Promise {
+ return initalizeUpstashRedis(nodeData, options)
+ }
+
+ async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise {
+ const redis = await initalizeUpstashRedis(nodeData, options)
+ const sessionId = nodeData.inputs?.sessionId as string
+ const chatId = options?.chatId as string
+ options.logger.info(`Clearing Upstash Redis memory session ${sessionId ? sessionId : chatId}`)
+ await redis.clear()
+ options.logger.info(`Successfully cleared Upstash Redis memory session ${sessionId ? sessionId : chatId}`)
+ }
+}
+
+const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject): Promise => {
+ const baseURL = nodeData.inputs?.baseURL as string
+ const sessionId = nodeData.inputs?.sessionId as string
+ const sessionTTL = nodeData.inputs?.sessionTTL as string
+ const chatId = options?.chatId as string
+
+ let isSessionIdUsingChatMessageId = false
+ if (!sessionId && chatId) isSessionIdUsingChatMessageId = true
+
+ const credentialData = await getCredentialData(nodeData.credential ?? '', options)
+ const upstashRestToken = getCredentialParam('upstashRestToken', credentialData, nodeData)
+
+ const redisChatMessageHistory = new UpstashRedisChatMessageHistory({
+ sessionId: sessionId ? sessionId : chatId,
+ sessionTTL: sessionTTL ? parseInt(sessionTTL, 10) : undefined,
+ config: {
+ url: baseURL,
+ token: upstashRestToken
+ }
+ })
+
+ const memory = new BufferMemoryExtended({
+ chatHistory: redisChatMessageHistory,
+ isSessionIdUsingChatMessageId
+ })
+
+ return memory
+}
+
+interface BufferMemoryExtendedInput {
+ isSessionIdUsingChatMessageId: boolean
+}
+
+class BufferMemoryExtended extends BufferMemory {
+ isSessionIdUsingChatMessageId? = false
+
+ constructor(fields: BufferMemoryInput & Partial) {
+ super(fields)
+ this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
+ }
+}
+
+module.exports = { nodeClass: UpstashRedisBackedChatMemory_Memory }
diff --git a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/upstash.svg b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/upstash.svg
new file mode 100644
index 000000000..a0fb96a79
--- /dev/null
+++ b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/upstash.svg
@@ -0,0 +1,12 @@
+
+
diff --git a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts
new file mode 100644
index 000000000..a9f4b3d87
--- /dev/null
+++ b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts
@@ -0,0 +1,107 @@
+import { VectorStore } from 'langchain/vectorstores/base'
+import { INode, INodeData, INodeParams, INodeOutputsValue } from '../../../src/Interface'
+import { handleEscapeCharacters } from '../../../src'
+import { ScoreThresholdRetriever } from 'langchain/retrievers/score_threshold'
+
+class SimilarityThresholdRetriever_Retrievers implements INode {
+ label: string
+ name: string
+ version: number
+ description: string
+ type: string
+ icon: string
+ category: string
+ baseClasses: string[]
+ inputs: INodeParams[]
+ outputs: INodeOutputsValue[]
+
+ constructor() {
+ this.label = 'Similarity Score Threshold Retriever'
+ this.name = 'similarityThresholdRetriever'
+ this.version = 1.0
+ this.type = 'SimilarityThresholdRetriever'
+ this.icon = 'similaritythreshold.svg'
+ this.category = 'Retrievers'
+ this.description = 'Return results based on the minimum similarity percentage'
+ this.baseClasses = [this.type, 'BaseRetriever']
+ this.inputs = [
+ {
+ label: 'Vector Store',
+ name: 'vectorStore',
+ type: 'VectorStore'
+ },
+ {
+ label: 'Minimum Similarity Score (%)',
+ name: 'minSimilarityScore',
+ description: 'Finds results with at least this similarity score',
+ type: 'number',
+ default: 80,
+ step: 1
+ },
+ {
+ label: 'Max K',
+ name: 'maxK',
+ description: `The maximum number of results to fetch`,
+ type: 'number',
+ default: 20,
+ step: 1
+ },
+ {
+ label: 'K Increment',
+ name: 'kIncrement',
+ description: `How much to increase K by each time. It'll fetch N results, then N + kIncrement, then N + kIncrement * 2, etc.`,
+ type: 'number',
+ default: 2,
+ step: 1
+ }
+ ]
+ this.outputs = [
+ {
+ label: 'Similarity Threshold Retriever',
+ name: 'retriever',
+ baseClasses: this.baseClasses
+ },
+ {
+ label: 'Document',
+ name: 'document',
+ baseClasses: ['Document']
+ },
+ {
+ label: 'Text',
+ name: 'text',
+ baseClasses: ['string', 'json']
+ }
+ ]
+ }
+
+ async init(nodeData: INodeData, input: string): Promise {
+ const vectorStore = nodeData.inputs?.vectorStore as VectorStore
+ const minSimilarityScore = nodeData.inputs?.minSimilarityScore as number
+ const maxK = nodeData.inputs?.maxK as string
+ const kIncrement = nodeData.inputs?.kIncrement as string
+
+ const output = nodeData.outputs?.output as string
+
+ const retriever = ScoreThresholdRetriever.fromVectorStore(vectorStore, {
+ minSimilarityScore: minSimilarityScore ? minSimilarityScore / 100 : 0.9,
+ maxK: maxK ? parseInt(maxK, 10) : 100,
+ kIncrement: kIncrement ? parseInt(kIncrement, 10) : 2
+ })
+
+ if (output === 'retriever') return retriever
+ else if (output === 'document') return await retriever.getRelevantDocuments(input)
+ else if (output === 'text') {
+ let finaltext = ''
+
+ const docs = await retriever.getRelevantDocuments(input)
+
+ for (const doc of docs) finaltext += `${doc.pageContent}\n`
+
+ return handleEscapeCharacters(finaltext, false)
+ }
+
+ return retriever
+ }
+}
+
+module.exports = { nodeClass: SimilarityThresholdRetriever_Retrievers }
diff --git a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/similaritythreshold.svg b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/similaritythreshold.svg
new file mode 100644
index 000000000..6b918fd8b
--- /dev/null
+++ b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/similaritythreshold.svg
@@ -0,0 +1,5 @@
+
\ No newline at end of file
diff --git a/packages/components/nodes/vectorstores/Chroma/core.ts b/packages/components/nodes/vectorstores/Chroma/core.ts
index 8277c58ef..b1bf9cc79 100644
--- a/packages/components/nodes/vectorstores/Chroma/core.ts
+++ b/packages/components/nodes/vectorstores/Chroma/core.ts
@@ -30,7 +30,7 @@ export class ChromaExtended extends Chroma {
if (this.chromaApiKey) {
obj.fetchOptions = {
headers: {
- 'X-Api-Key': this.chromaApiKey
+ Authorization: `Bearer ${this.chromaApiKey}`
}
}
}
diff --git a/packages/components/package.json b/packages/components/package.json
index 3fdd8923a..bd7251f6c 100644
--- a/packages/components/package.json
+++ b/packages/components/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise-components",
- "version": "1.3.7",
+ "version": "1.3.8",
"description": "Flowiseai Components",
"main": "dist/src/index",
"types": "dist/src/index.d.ts",
@@ -18,8 +18,9 @@
"dependencies": {
"@aws-sdk/client-dynamodb": "^3.360.0",
"@dqbd/tiktoken": "^1.0.7",
- "@getzep/zep-js": "^0.6.3",
"@elastic/elasticsearch": "^8.9.0",
+ "@getzep/zep-js": "^0.6.3",
+ "@gomomento/sdk": "^1.40.2",
"@google-ai/generativelanguage": "^0.2.1",
"@huggingface/inference": "^2.6.1",
"@notionhq/client": "^2.2.8",
@@ -29,6 +30,7 @@
"@supabase/supabase-js": "^2.29.0",
"@types/js-yaml": "^4.0.5",
"@types/jsdom": "^21.1.1",
+ "@upstash/redis": "^1.22.1",
"@zilliz/milvus2-sdk-node": "^2.2.24",
"apify-client": "^2.7.1",
"axios": "^0.27.2",
@@ -43,7 +45,8 @@
"google-auth-library": "^9.0.0",
"graphql": "^16.6.0",
"html-to-text": "^9.0.5",
- "langchain": "^0.0.152",
+ "ioredis": "^5.3.2",
+ "langchain": "^0.0.157",
"langfuse-langchain": "^1.0.14-alpha.0",
"langsmith": "^0.0.32",
"linkifyjs": "^4.1.1",
diff --git a/packages/components/src/handler.ts b/packages/components/src/handler.ts
index 10f9a2141..3b1952d6d 100644
--- a/packages/components/src/handler.ts
+++ b/packages/components/src/handler.ts
@@ -151,6 +151,7 @@ export class CustomChainHandler extends BaseCallbackHandler {
socketIOClientId = ''
skipK = 0 // Skip streaming for first K numbers of handleLLMStart
returnSourceDocuments = false
+ cachedResponse = true
constructor(socketIO: Server, socketIOClientId: string, skipK?: number, returnSourceDocuments?: boolean) {
super()
@@ -161,6 +162,7 @@ export class CustomChainHandler extends BaseCallbackHandler {
}
handleLLMStart() {
+ this.cachedResponse = false
if (this.skipK > 0) this.skipK -= 1
}
@@ -178,9 +180,30 @@ export class CustomChainHandler extends BaseCallbackHandler {
this.socketIO.to(this.socketIOClientId).emit('end')
}
- handleChainEnd(outputs: ChainValues): void | Promise {
- if (this.returnSourceDocuments) {
- this.socketIO.to(this.socketIOClientId).emit('sourceDocuments', outputs?.sourceDocuments)
+ handleChainEnd(outputs: ChainValues, _: string, parentRunId?: string): void | Promise {
+ /*
+ Langchain does not call handleLLMStart, handleLLMEnd, handleLLMNewToken when the chain is cached.
+ Callback Order is "Chain Start -> LLM Start --> LLM Token --> LLM End -> Chain End" for normal responses.
+ Callback Order is "Chain Start -> Chain End" for cached responses.
+ */
+ if (this.cachedResponse && parentRunId === undefined) {
+ const cachedValue = outputs.text ?? outputs.response ?? outputs.output ?? outputs.output_text
+ //split at whitespace, and keep the whitespace. This is to preserve the original formatting.
+ const result = cachedValue.split(/(\s+)/)
+ result.forEach((token: string, index: number) => {
+ if (index === 0) {
+ this.socketIO.to(this.socketIOClientId).emit('start', token)
+ }
+ this.socketIO.to(this.socketIOClientId).emit('token', token)
+ })
+ if (this.returnSourceDocuments) {
+ this.socketIO.to(this.socketIOClientId).emit('sourceDocuments', outputs?.sourceDocuments)
+ }
+ this.socketIO.to(this.socketIOClientId).emit('end')
+ } else {
+ if (this.returnSourceDocuments) {
+ this.socketIO.to(this.socketIOClientId).emit('sourceDocuments', outputs?.sourceDocuments)
+ }
}
}
}
diff --git a/packages/server/marketplaces/chatflows/API Agent OpenAI.json b/packages/server/marketplaces/chatflows/API Agent OpenAI.json
index 01e3d8f99..b2ff977be 100644
--- a/packages/server/marketplaces/chatflows/API Agent OpenAI.json
+++ b/packages/server/marketplaces/chatflows/API Agent OpenAI.json
@@ -89,7 +89,7 @@
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -201,7 +201,15 @@
"id": "chatOpenAI_1-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_1-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
@@ -392,7 +400,7 @@
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -504,7 +512,15 @@
"id": "chatOpenAI_2-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_2-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json
index b9862add3..c37aaff22 100644
--- a/packages/server/marketplaces/chatflows/API Agent.json
+++ b/packages/server/marketplaces/chatflows/API Agent.json
@@ -397,7 +397,7 @@
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -509,7 +509,15 @@
"id": "chatOpenAI_2-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_2-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
@@ -551,7 +559,7 @@
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -663,7 +671,15 @@
"id": "chatOpenAI_1-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_1-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
@@ -705,7 +721,7 @@
"id": "chatOpenAI_3",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -817,7 +833,15 @@
"id": "chatOpenAI_3-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_3-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/Antonym.json b/packages/server/marketplaces/chatflows/Antonym.json
index 95d3c1517..5f8ff7a8e 100644
--- a/packages/server/marketplaces/chatflows/Antonym.json
+++ b/packages/server/marketplaces/chatflows/Antonym.json
@@ -169,14 +169,14 @@
"id": "chatOpenAI_0",
"position": {
"x": 1226.7977900193628,
- "y": 48.01100655894436
+ "y": -22.01100655894436
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -288,7 +288,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
@@ -313,7 +321,7 @@
"selected": false,
"positionAbsolute": {
"x": 1226.7977900193628,
- "y": 48.01100655894436
+ "y": -22.01100655894436
},
"dragging": false
},
diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json
index 538371516..4fb706ab1 100644
--- a/packages/server/marketplaces/chatflows/AutoGPT.json
+++ b/packages/server/marketplaces/chatflows/AutoGPT.json
@@ -252,7 +252,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -364,7 +364,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json
index c28975314..04410b82f 100644
--- a/packages/server/marketplaces/chatflows/BabyAGI.json
+++ b/packages/server/marketplaces/chatflows/BabyAGI.json
@@ -78,7 +78,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -190,7 +190,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/CSV Agent.json b/packages/server/marketplaces/chatflows/CSV Agent.json
index 1515fcad3..37764a53f 100644
--- a/packages/server/marketplaces/chatflows/CSV Agent.json
+++ b/packages/server/marketplaces/chatflows/CSV Agent.json
@@ -70,7 +70,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -182,7 +182,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json
index 471853baa..1f00ff5f8 100644
--- a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json
+++ b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json
@@ -215,7 +215,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -327,7 +327,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/Claude LLM.json b/packages/server/marketplaces/chatflows/Claude LLM.json
index 243d26001..b79898154 100644
--- a/packages/server/marketplaces/chatflows/Claude LLM.json
+++ b/packages/server/marketplaces/chatflows/Claude LLM.json
@@ -141,14 +141,14 @@
"id": "chatAnthropic_0",
"position": {
"x": 800.5525382783799,
- "y": -76.7988221837009
+ "y": -130.7988221837009
},
"type": "customNode",
"data": {
"id": "chatAnthropic_0",
"label": "ChatAnthropic",
"name": "chatAnthropic",
- "version": 1,
+ "version": 2,
"type": "ChatAnthropic",
"baseClasses": ["ChatAnthropic", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -258,7 +258,15 @@
"id": "chatAnthropic_0-input-topK-number"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatAnthropic_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "claude-2",
"temperature": 0.9,
@@ -280,7 +288,7 @@
"selected": false,
"positionAbsolute": {
"x": 800.5525382783799,
- "y": -76.7988221837009
+ "y": -130.7988221837009
},
"dragging": false
},
diff --git a/packages/server/marketplaces/chatflows/Conversational Agent.json b/packages/server/marketplaces/chatflows/Conversational Agent.json
index 55475b3e9..d18f2ac06 100644
--- a/packages/server/marketplaces/chatflows/Conversational Agent.json
+++ b/packages/server/marketplaces/chatflows/Conversational Agent.json
@@ -157,7 +157,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -269,7 +269,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json
index dcf344d12..7c5c38e2d 100644
--- a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json
+++ b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json
@@ -13,7 +13,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
- "version": 1,
+ "version": 2,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@@ -126,7 +126,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-16k",
"temperature": "0",
diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json
index bf27e443e..725ca7c90 100644
--- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json
+++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json
@@ -7,14 +7,14 @@
"id": "chatOpenAI_0",
"position": {
"x": 1184.1176114500388,
- "y": -44.15535835370571
+ "y": -74.15535835370571
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -126,7 +126,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": "0",
@@ -150,7 +158,7 @@
},
"positionAbsolute": {
"x": 1184.1176114500388,
- "y": -44.15535835370571
+ "y": -74.15535835370571
},
"selected": false,
"dragging": false
diff --git a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json
index f8b274a18..2928d29dd 100644
--- a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json
+++ b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json
@@ -386,7 +386,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -498,7 +498,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json b/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json
index 6e159a286..e51e1ee03 100644
--- a/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json
+++ b/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json
@@ -148,14 +148,14 @@
"id": "huggingFaceInference_LLMs_0",
"position": {
"x": 498.8594464193537,
- "y": -44.91050256311678
+ "y": -94.91050256311678
},
"type": "customNode",
"data": {
"id": "huggingFaceInference_LLMs_0",
"label": "HuggingFace Inference",
"name": "huggingFaceInference_LLMs",
- "version": 1,
+ "version": 2,
"type": "HuggingFaceInference",
"baseClasses": ["HuggingFaceInference", "LLM", "BaseLLM", "BaseLanguageModel"],
"category": "LLMs",
@@ -232,7 +232,15 @@
"id": "huggingFaceInference_LLMs_0-input-frequencyPenalty-number"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "huggingFaceInference_LLMs_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"model": "tiiuae/falcon-7b-instruct",
"endpoint": "",
@@ -256,7 +264,7 @@
"selected": false,
"positionAbsolute": {
"x": 498.8594464193537,
- "y": -44.91050256311678
+ "y": -94.91050256311678
},
"dragging": false
}
diff --git a/packages/server/marketplaces/chatflows/Local QnA.json b/packages/server/marketplaces/chatflows/Local QnA.json
index 9d9f5ec80..21bdf744c 100644
--- a/packages/server/marketplaces/chatflows/Local QnA.json
+++ b/packages/server/marketplaces/chatflows/Local QnA.json
@@ -265,14 +265,14 @@
"id": "chatLocalAI_0",
"position": {
"x": 1191.9512064167336,
- "y": -44.05401001663306
+ "y": -94.05401001663306
},
"type": "customNode",
"data": {
"id": "chatLocalAI_0",
"label": "ChatLocalAI",
"name": "chatLocalAI",
- "version": 1,
+ "version": 2,
"type": "ChatLocalAI",
"baseClasses": ["ChatLocalAI", "BaseChatModel", "LLM", "BaseLLM", "BaseLanguageModel", "BaseLangChain"],
"category": "Chat Models",
@@ -325,7 +325,15 @@
"id": "chatLocalAI_0-input-timeout-number"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatLocalAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"basePath": "http://localhost:8080/v1",
"modelName": "ggml-gpt4all-j.bin",
@@ -348,7 +356,7 @@
"selected": false,
"positionAbsolute": {
"x": 1191.9512064167336,
- "y": -44.05401001663306
+ "y": -94.05401001663306
},
"dragging": false
},
diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json
index 07669f82b..6f22c00ae 100644
--- a/packages/server/marketplaces/chatflows/Long Term Memory.json
+++ b/packages/server/marketplaces/chatflows/Long Term Memory.json
@@ -115,14 +115,14 @@
"id": "chatOpenAI_0",
"position": {
"x": 1554.3875781165111,
- "y": -14.792508259787212
+ "y": -74.792508259787212
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -234,7 +234,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": "0",
@@ -259,7 +267,7 @@
"selected": false,
"positionAbsolute": {
"x": 1554.3875781165111,
- "y": -14.792508259787212
+ "y": -74.792508259787212
},
"dragging": false
},
diff --git a/packages/server/marketplaces/chatflows/MRKLAgent.json b/packages/server/marketplaces/chatflows/MRKLAgent.json
index f851b0ede..697e49195 100644
--- a/packages/server/marketplaces/chatflows/MRKLAgent.json
+++ b/packages/server/marketplaces/chatflows/MRKLAgent.json
@@ -156,7 +156,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -268,7 +268,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/Metadata Filter Load.json b/packages/server/marketplaces/chatflows/Metadata Filter Load.json
index b6ca91e30..43438d6b4 100644
--- a/packages/server/marketplaces/chatflows/Metadata Filter Load.json
+++ b/packages/server/marketplaces/chatflows/Metadata Filter Load.json
@@ -113,14 +113,14 @@
"id": "chatOpenAI_0",
"position": {
"x": 1197.7264239788542,
- "y": -16.177600120515933
+ "y": -76.177600120515933
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -232,7 +232,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": "0",
@@ -257,7 +265,7 @@
"selected": false,
"positionAbsolute": {
"x": 1197.7264239788542,
- "y": -16.177600120515933
+ "y": -76.177600120515933
},
"dragging": false
},
diff --git a/packages/server/marketplaces/chatflows/Metadata Filter Upsert.json b/packages/server/marketplaces/chatflows/Metadata Filter Upsert.json
index e70b11f74..525fa1b9f 100644
--- a/packages/server/marketplaces/chatflows/Metadata Filter Upsert.json
+++ b/packages/server/marketplaces/chatflows/Metadata Filter Upsert.json
@@ -436,7 +436,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -548,7 +548,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json
index cf86df5be..e1063dcfe 100644
--- a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json
+++ b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json
@@ -278,7 +278,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -390,7 +390,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json
index f5604bf60..36240e39d 100644
--- a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json
+++ b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json
@@ -679,7 +679,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -791,7 +791,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/Multiple VectorDB.json b/packages/server/marketplaces/chatflows/Multiple VectorDB.json
index ac4643aab..b4dedfddf 100644
--- a/packages/server/marketplaces/chatflows/Multiple VectorDB.json
+++ b/packages/server/marketplaces/chatflows/Multiple VectorDB.json
@@ -321,14 +321,14 @@
"id": "openAI_2",
"position": {
"x": 520.8471510168988,
- "y": -1282.1183473852964
+ "y": -1362.1183473852964
},
"type": "customNode",
"data": {
"id": "openAI_2",
"label": "OpenAI",
"name": "openAI",
- "version": 2,
+ "version": 3,
"type": "OpenAI",
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
"category": "LLMs",
@@ -436,7 +436,15 @@
"id": "openAI_2-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "openAI_2-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-instruct",
"temperature": 0.7,
@@ -463,7 +471,7 @@
"selected": false,
"positionAbsolute": {
"x": 520.8471510168988,
- "y": -1282.1183473852964
+ "y": -1362.1183473852964
},
"dragging": false
},
@@ -557,7 +565,7 @@
"id": "chromaExistingIndex_0",
"position": {
"x": 509.55198017578016,
- "y": -732.42003311752
+ "y": -782.42003311752
},
"type": "customNode",
"data": {
@@ -638,7 +646,7 @@
"selected": false,
"positionAbsolute": {
"x": 509.55198017578016,
- "y": -732.42003311752
+ "y": -782.42003311752
},
"dragging": false
},
@@ -732,14 +740,14 @@
"id": "openAI_3",
"position": {
"x": 504.808358369027,
- "y": -197.78194663790197
+ "y": -257.78194663790197
},
"type": "customNode",
"data": {
"id": "openAI_3",
"label": "OpenAI",
"name": "openAI",
- "version": 2,
+ "version": 3,
"type": "OpenAI",
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
"category": "LLMs",
@@ -847,7 +855,15 @@
"id": "openAI_3-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "openAI_3-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-instruct",
"temperature": 0.7,
@@ -874,7 +890,7 @@
"selected": false,
"positionAbsolute": {
"x": 504.808358369027,
- "y": -197.78194663790197
+ "y": -257.78194663790197
},
"dragging": false
},
@@ -993,14 +1009,14 @@
"id": "openAI_4",
"position": {
"x": 1619.5346765785587,
- "y": 292.29615581180684
+ "y": 352.29615581180684
},
"type": "customNode",
"data": {
"id": "openAI_4",
"label": "OpenAI",
"name": "openAI",
- "version": 2,
+ "version": 3,
"type": "OpenAI",
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
"category": "LLMs",
@@ -1108,7 +1124,15 @@
"id": "openAI_4-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "openAI_4-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-instruct",
"temperature": 0.7,
@@ -1135,7 +1159,7 @@
"selected": false,
"positionAbsolute": {
"x": 1619.5346765785587,
- "y": 292.29615581180684
+ "y": 352.29615581180684
},
"dragging": false
}
diff --git a/packages/server/marketplaces/chatflows/OpenAI Agent.json b/packages/server/marketplaces/chatflows/OpenAI Agent.json
index 91d5d38ce..9a98d29dc 100644
--- a/packages/server/marketplaces/chatflows/OpenAI Agent.json
+++ b/packages/server/marketplaces/chatflows/OpenAI Agent.json
@@ -281,7 +281,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -393,7 +393,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json
index 9d6838eb3..2af611900 100644
--- a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json
+++ b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json
@@ -260,13 +260,13 @@
"id": "chatOpenAI_0",
"position": {
"x": 335.7621848973805,
- "y": -651.7411273245009
+ "y": -721.7411273245009
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
- "version": 1,
+ "version": 2,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@@ -385,7 +385,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-16k",
"temperature": 0.9,
@@ -411,7 +419,7 @@
"dragging": false,
"positionAbsolute": {
"x": 335.7621848973805,
- "y": -651.7411273245009
+ "y": -721.7411273245009
}
},
{
@@ -420,13 +428,13 @@
"id": "chatOpenAI_1",
"position": {
"x": 1765.2801848172305,
- "y": -667.9261054149061
+ "y": -737.9261054149061
},
"type": "customNode",
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
- "version": 1,
+ "version": 2,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@@ -545,7 +553,15 @@
"id": "chatOpenAI_1-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_1-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-16k",
"temperature": 0.9,
@@ -571,7 +587,7 @@
"dragging": false,
"positionAbsolute": {
"x": 1765.2801848172305,
- "y": -667.9261054149061
+ "y": -737.9261054149061
}
},
{
diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining.json b/packages/server/marketplaces/chatflows/Prompt Chaining.json
index 5bce99057..77c238ad7 100644
--- a/packages/server/marketplaces/chatflows/Prompt Chaining.json
+++ b/packages/server/marketplaces/chatflows/Prompt Chaining.json
@@ -289,14 +289,14 @@
"id": "openAI_1",
"position": {
"x": 791.6102007244282,
- "y": -13.71386876566092
+ "y": -83.71386876566092
},
"type": "customNode",
"data": {
"id": "openAI_1",
"label": "OpenAI",
"name": "openAI",
- "version": 2,
+ "version": 3,
"type": "OpenAI",
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
"category": "LLMs",
@@ -404,7 +404,15 @@
"id": "openAI_1-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "openAI_1-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-instruct",
"temperature": 0.7,
@@ -431,7 +439,7 @@
"selected": false,
"positionAbsolute": {
"x": 791.6102007244282,
- "y": -13.71386876566092
+ "y": -83.71386876566092
},
"dragging": false
},
@@ -441,14 +449,14 @@
"id": "openAI_2",
"position": {
"x": 1571.148617508543,
- "y": -20.372437481171687
+ "y": -90.372437481171687
},
"type": "customNode",
"data": {
"id": "openAI_2",
"label": "OpenAI",
"name": "openAI",
- "version": 2,
+ "version": 3,
"type": "OpenAI",
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
"category": "LLMs",
@@ -556,7 +564,15 @@
"id": "openAI_2-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "openAI_2-input-cache-BaseCache"
+ }
+ ],
"default": "gpt-3.5-turbo-instruct",
"inputs": {
"modelName": "gpt-3.5-turbo-instruct",
@@ -584,7 +600,7 @@
"selected": false,
"positionAbsolute": {
"x": 1571.148617508543,
- "y": -20.372437481171687
+ "y": -90.372437481171687
},
"dragging": false
}
diff --git a/packages/server/marketplaces/chatflows/Replicate LLM.json b/packages/server/marketplaces/chatflows/Replicate LLM.json
index c5a0ac8ff..0049214c7 100644
--- a/packages/server/marketplaces/chatflows/Replicate LLM.json
+++ b/packages/server/marketplaces/chatflows/Replicate LLM.json
@@ -148,13 +148,13 @@
"id": "replicate_0",
"position": {
"x": 623.313978186024,
- "y": -72.92788335022428
+ "y": -142.92788335022428
},
"type": "customNode",
"data": {
"id": "replicate_0",
"label": "Replicate",
- "version": 1,
+ "version": 2,
"name": "replicate",
"type": "Replicate",
"baseClasses": ["Replicate", "BaseChatModel", "LLM", "BaseLLM", "BaseLanguageModel", "Runnable"],
@@ -226,7 +226,15 @@
"id": "replicate_0-input-additionalInputs-json"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "replicate_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"model": "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
"temperature": 0.7,
@@ -249,7 +257,7 @@
"selected": false,
"positionAbsolute": {
"x": 623.313978186024,
- "y": -72.92788335022428
+ "y": -142.92788335022428
},
"dragging": false
}
diff --git a/packages/server/marketplaces/chatflows/SQL DB Chain.json b/packages/server/marketplaces/chatflows/SQL DB Chain.json
index acec5432e..3b32efe09 100644
--- a/packages/server/marketplaces/chatflows/SQL DB Chain.json
+++ b/packages/server/marketplaces/chatflows/SQL DB Chain.json
@@ -13,7 +13,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
- "version": 1,
+ "version": 2,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@@ -126,7 +126,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": "0",
@@ -167,7 +175,7 @@
"data": {
"id": "sqlDatabaseChain_0",
"label": "Sql Database Chain",
- "version": 2,
+ "version": 4,
"name": "sqlDatabaseChain",
"type": "SqlDatabaseChain",
"baseClasses": ["SqlDatabaseChain", "BaseChain", "Runnable"],
@@ -206,6 +214,46 @@
"placeholder": "1270.0.0.1:5432/chinook",
"id": "sqlDatabaseChain_0-input-url-string"
},
+ {
+ "label": "Include Tables",
+ "name": "includesTables",
+ "type": "string",
+ "description": "Tables to include for queries, seperated by comma. Can only use Include Tables or Ignore Tables",
+ "placeholder": "table1, table2",
+ "additionalParams": true,
+ "optional": true,
+ "id": "sqlDatabaseChain_0-input-includesTables-string"
+ },
+ {
+ "label": "Ignore Tables",
+ "name": "ignoreTables",
+ "type": "string",
+ "description": "Tables to ignore for queries, seperated by comma. Can only use Ignore Tables or Include Tables",
+ "placeholder": "table1, table2",
+ "additionalParams": true,
+ "optional": true,
+ "id": "sqlDatabaseChain_0-input-ignoreTables-string"
+ },
+ {
+ "label": "Sample table's rows info",
+ "name": "sampleRowsInTableInfo",
+ "type": "number",
+ "description": "Number of sample row for tables to load for info.",
+ "placeholder": "3",
+ "additionalParams": true,
+ "optional": true,
+ "id": "sqlDatabaseChain_0-input-sampleRowsInTableInfo-number"
+ },
+ {
+ "label": "Top Keys",
+ "name": "topK",
+ "type": "number",
+ "description": "If you are querying for several rows of a table you can select the maximum number of results you want to get by using the top_k parameter (default is 10). This is useful for avoiding query results that exceed the prompt max length or consume tokens unnecessarily.",
+ "placeholder": "10",
+ "additionalParams": true,
+ "optional": true,
+ "id": "sqlDatabaseChain_0-input-topK-number"
+ },
{
"label": "Custom Prompt",
"name": "customPrompt",
diff --git a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json
index 2c41a54f6..57ff348ab 100644
--- a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json
+++ b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json
@@ -64,14 +64,14 @@
"id": "chatOpenAI_0",
"position": {
"x": 754.8942497823595,
- "y": -70.76607584232393
+ "y": -140
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -183,7 +183,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
@@ -208,7 +216,7 @@
"selected": false,
"positionAbsolute": {
"x": 754.8942497823595,
- "y": -70.76607584232393
+ "y": -140
},
"dragging": false
},
diff --git a/packages/server/marketplaces/chatflows/Simple LLM Chain.json b/packages/server/marketplaces/chatflows/Simple LLM Chain.json
index 21d5ab689..f3db04ef2 100644
--- a/packages/server/marketplaces/chatflows/Simple LLM Chain.json
+++ b/packages/server/marketplaces/chatflows/Simple LLM Chain.json
@@ -148,14 +148,14 @@
"id": "openAI_0",
"position": {
"x": 513.3297923232442,
- "y": -42.67554802812833
+ "y": -112.67554802812833
},
"type": "customNode",
"data": {
"id": "openAI_0",
"label": "OpenAI",
"name": "openAI",
- "version": 2,
+ "version": 3,
"type": "OpenAI",
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
"category": "LLMs",
@@ -263,7 +263,15 @@
"id": "openAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "openAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-instruct",
"temperature": 0.7,
@@ -290,7 +298,7 @@
"selected": false,
"positionAbsolute": {
"x": 513.3297923232442,
- "y": -42.67554802812833
+ "y": -112.67554802812833
},
"dragging": false
}
diff --git a/packages/server/marketplaces/chatflows/Translator.json b/packages/server/marketplaces/chatflows/Translator.json
index dc2ee6ba0..b552aceb8 100644
--- a/packages/server/marketplaces/chatflows/Translator.json
+++ b/packages/server/marketplaces/chatflows/Translator.json
@@ -157,14 +157,14 @@
"id": "chatOpenAI_0",
"position": {
"x": 436.97058562345904,
- "y": 99.96180150605153
+ "y": 29.96180150605153
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -276,7 +276,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": "0",
@@ -301,7 +309,7 @@
"selected": false,
"positionAbsolute": {
"x": 436.97058562345904,
- "y": 99.96180150605153
+ "y": 29.96180150605153
},
"dragging": false
}
diff --git a/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json b/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json
index 47cfef872..dc1c1e17d 100644
--- a/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json
+++ b/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json
@@ -124,7 +124,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
- "version": 1,
+ "version": 2,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@@ -230,7 +230,15 @@
"id": "chatOpenAI_0-input-baseOptions-json"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": "0.5",
diff --git a/packages/server/marketplaces/chatflows/WebBrowser.json b/packages/server/marketplaces/chatflows/WebBrowser.json
index 95743f9f4..b784f9ab7 100644
--- a/packages/server/marketplaces/chatflows/WebBrowser.json
+++ b/packages/server/marketplaces/chatflows/WebBrowser.json
@@ -194,14 +194,14 @@
"id": "chatOpenAI_0",
"position": {
"x": 734.7477982032904,
- "y": -400.9979556765114
+ "y": -470.9979556765114
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -313,7 +313,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
@@ -338,7 +346,7 @@
"selected": false,
"positionAbsolute": {
"x": 734.7477982032904,
- "y": -400.9979556765114
+ "y": -470.9979556765114
},
"dragging": false
},
@@ -432,14 +440,14 @@
"id": "chatOpenAI_1",
"position": {
"x": 68.312124033115,
- "y": -169.65476709991256
+ "y": -239.65476709991256
},
"type": "customNode",
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"name": "chatOpenAI",
- "version": 1,
+ "version": 2,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@@ -551,7 +559,15 @@
"id": "chatOpenAI_1-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_1-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
@@ -576,7 +592,7 @@
"selected": false,
"positionAbsolute": {
"x": 68.312124033115,
- "y": -169.65476709991256
+ "y": -239.65476709991256
},
"dragging": false
}
diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json
index 812f0bd50..66bac37f6 100644
--- a/packages/server/marketplaces/chatflows/WebPage QnA.json
+++ b/packages/server/marketplaces/chatflows/WebPage QnA.json
@@ -13,7 +13,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
- "version": 1,
+ "version": 2,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@@ -126,7 +126,15 @@
"id": "chatOpenAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "chatOpenAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-16k",
"temperature": "0.9",
diff --git a/packages/server/marketplaces/chatflows/Zapier NLA.json b/packages/server/marketplaces/chatflows/Zapier NLA.json
index 182b24aee..49527da24 100644
--- a/packages/server/marketplaces/chatflows/Zapier NLA.json
+++ b/packages/server/marketplaces/chatflows/Zapier NLA.json
@@ -115,7 +115,7 @@
"id": "openAI_0",
"label": "OpenAI",
"name": "openAI",
- "version": 2,
+ "version": 3,
"type": "OpenAI",
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
"category": "LLMs",
@@ -223,7 +223,15 @@
"id": "openAI_0-input-basepath-string"
}
],
- "inputAnchors": [],
+ "inputAnchors": [
+ {
+ "label": "Cache",
+ "name": "cache",
+ "type": "BaseCache",
+ "optional": true,
+ "id": "openAI_0-input-cache-BaseCache"
+ }
+ ],
"inputs": {
"modelName": "gpt-3.5-turbo-instruct",
"temperature": 0.7,
diff --git a/packages/server/package.json b/packages/server/package.json
index 6f17ad611..a5fb994c7 100644
--- a/packages/server/package.json
+++ b/packages/server/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise",
- "version": "1.3.6",
+ "version": "1.3.7",
"description": "Flowiseai Server",
"main": "dist/index",
"types": "dist/index.d.ts",
diff --git a/packages/server/src/commands/start.ts b/packages/server/src/commands/start.ts
index b9ea970d5..6bf7d6993 100644
--- a/packages/server/src/commands/start.ts
+++ b/packages/server/src/commands/start.ts
@@ -27,6 +27,7 @@ export default class Start extends Command {
LOG_LEVEL: Flags.string(),
TOOL_FUNCTION_BUILTIN_DEP: Flags.string(),
TOOL_FUNCTION_EXTERNAL_DEP: Flags.string(),
+ NUMBER_OF_PROXIES: Flags.string(),
DATABASE_TYPE: Flags.string(),
DATABASE_PATH: Flags.string(),
DATABASE_PORT: Flags.string(),
@@ -72,6 +73,7 @@ export default class Start extends Command {
if (flags.PORT) process.env.PORT = flags.PORT
if (flags.DEBUG) process.env.DEBUG = flags.DEBUG
+ if (flags.NUMBER_OF_PROXIES) process.env.NUMBER_OF_PROXIES = flags.NUMBER_OF_PROXIES
// Authorization
if (flags.FLOWISE_USERNAME) process.env.FLOWISE_USERNAME = flags.FLOWISE_USERNAME
diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts
index bb048490d..664f110bc 100644
--- a/packages/server/src/utils/index.ts
+++ b/packages/server/src/utils/index.ts
@@ -477,6 +477,7 @@ export const replaceInputsWithConfig = (flowNodeData: INodeData, overrideConfig:
*/
export const isStartNodeDependOnInput = (startingNodes: IReactFlowNode[], nodes: IReactFlowNode[]): boolean => {
for (const node of startingNodes) {
+ if (node.data.category === 'Cache') return true
for (const inputName in node.data.inputs) {
const inputVariables = getInputVariables(node.data.inputs[inputName])
if (inputVariables.length > 0) return true
diff --git a/packages/server/src/utils/rateLimit.ts b/packages/server/src/utils/rateLimit.ts
index b1cd1819d..68b5b693b 100644
--- a/packages/server/src/utils/rateLimit.ts
+++ b/packages/server/src/utils/rateLimit.ts
@@ -12,7 +12,7 @@ async function addRateLimiter(id: string, duration: number, limit: number, messa
rateLimiters[id] = rateLimit({
windowMs: duration * 1000,
max: limit,
- handler: (req, res) => {
+ handler: (_, res) => {
res.status(429).send(message)
}
})
@@ -33,15 +33,19 @@ export function getRateLimiter(req: Request, res: Response, next: NextFunction)
export async function createRateLimiter(chatFlow: IChatFlow) {
if (!chatFlow.apiConfig) return
- const apiConfig: any = JSON.parse(chatFlow.apiConfig)
+ const apiConfig = JSON.parse(chatFlow.apiConfig)
+
const rateLimit: { limitDuration: number; limitMax: number; limitMsg: string } = apiConfig.rateLimit
if (!rateLimit) return
+
const { limitDuration, limitMax, limitMsg } = rateLimit
if (limitMax && limitDuration && limitMsg) await addRateLimiter(chatFlow.id, limitDuration, limitMax, limitMsg)
}
export async function initializeRateLimiter(chatFlowPool: IChatFlow[]) {
- await chatFlowPool.map(async (chatFlow) => {
- await createRateLimiter(chatFlow)
- })
+ await Promise.all(
+ chatFlowPool.map(async (chatFlow) => {
+ await createRateLimiter(chatFlow)
+ })
+ )
}
diff --git a/packages/ui/package.json b/packages/ui/package.json
index f0101d2ce..239cc3ceb 100644
--- a/packages/ui/package.json
+++ b/packages/ui/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise-ui",
- "version": "1.3.4",
+ "version": "1.3.5",
"license": "SEE LICENSE IN LICENSE.md",
"homepage": "https://flowiseai.com",
"author": {
diff --git a/packages/ui/src/views/chatflows/Configuration.js b/packages/ui/src/views/chatflows/Configuration.js
index 51b8d61c8..d569020b4 100644
--- a/packages/ui/src/views/chatflows/Configuration.js
+++ b/packages/ui/src/views/chatflows/Configuration.js
@@ -136,7 +136,7 @@ const Configuration = () => {
Rate Limit Setup Guide to set up Rate Limit correctly in your hosting environment.'
+ 'Visit Rate Limit Setup Guide to set up Rate Limit correctly in your hosting environment.'
}
/>
diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js
index 7a15d9ff2..3e9675414 100644
--- a/packages/ui/src/views/chatmessage/ChatMessage.js
+++ b/packages/ui/src/views/chatmessage/ChatMessage.js
@@ -64,27 +64,10 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
window.open(data, '_blank')
}
- const handleVectaraMetadata = (message) => {
- if (message.sourceDocuments && message.sourceDocuments[0].metadata.length)
- message.sourceDocuments = message.sourceDocuments.map((docs) => {
- const newMetadata = docs.metadata.reduce((newMetadata, metadata) => {
- newMetadata[metadata.name] = metadata.value
- return newMetadata
- }, {})
- return {
- pageContent: docs.pageContent,
- metadata: newMetadata
- }
- })
- return message
- }
-
const removeDuplicateURL = (message) => {
const visitedURLs = []
const newSourceDocuments = []
- message = handleVectaraMetadata(message)
-
message.sourceDocuments.forEach((source) => {
if (isValidURL(source.metadata.source) && !visitedURLs.includes(source.metadata.source)) {
visitedURLs.push(source.metadata.source)
@@ -174,8 +157,6 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
if (response.data) {
let data = response.data
- data = handleVectaraMetadata(data)
-
if (typeof data === 'object' && data.text && data.sourceDocuments) {
if (!isChatFlowAvailableToStream) {
setMessages((prevMessages) => [
diff --git a/packages/ui/src/views/marketplaces/index.js b/packages/ui/src/views/marketplaces/index.js
index a78361616..49c692db3 100644
--- a/packages/ui/src/views/marketplaces/index.js
+++ b/packages/ui/src/views/marketplaces/index.js
@@ -175,19 +175,8 @@ const Marketplace = () => {
)}
))}
- {!isChatflowsLoading && (!getAllChatflowsMarketplacesApi.data || getAllChatflowsMarketplacesApi.data.length === 0) && (
-
-
-
-
- No Marketplace Yet
-
- )}
- {!isToolsLoading && (!getAllToolsMarketplacesApi.data || getAllToolsMarketplacesApi.data.length === 0) && (
+ {((!isChatflowsLoading && (!getAllChatflowsMarketplacesApi.data || getAllChatflowsMarketplacesApi.data.length === 0)) ||
+ (!isToolsLoading && (!getAllToolsMarketplacesApi.data || getAllToolsMarketplacesApi.data.length === 0))) && (
![]()