Merge branch 'main' into FEATURE/elasticsearch-vectorstore
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "1.3.6",
|
||||
"version": "1.3.7",
|
||||
"private": true,
|
||||
"homepage": "https://flowiseai.com",
|
||||
"workspaces": [
|
||||
|
|
|
|||
|
|
@ -0,0 +1,47 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class AWSApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
optional: boolean
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'AWS security credentials'
|
||||
this.name = 'awsApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Your <a target="_blank" href="https://docs.aws.amazon.com/IAM/latest/UserGuide/security-creds.html">AWS security credentials</a>. When unspecified, credentials will be sourced from the runtime environment according to the default AWS SDK behavior.'
|
||||
this.optional = true
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'AWS Access Key',
|
||||
name: 'awsKey',
|
||||
type: 'string',
|
||||
placeholder: '<AWS_ACCESS_KEY_ID>',
|
||||
description: 'The access key for your AWS account.',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'AWS Secret Access Key',
|
||||
name: 'awsSecret',
|
||||
type: 'password',
|
||||
placeholder: '<AWS_SECRET_ACCESS_KEY>',
|
||||
description: 'The secret key for your AWS account.',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'AWS Session Key',
|
||||
name: 'awsSession',
|
||||
type: 'password',
|
||||
placeholder: '<AWS_SESSION_TOKEN>',
|
||||
description: 'The session key for your AWS account. This is only needed when you are using temporary credentials.',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: AWSApi }
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class MomentoCacheApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Momento Cache API'
|
||||
this.name = 'momentoCacheApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.momentohq.com/cache/develop/authentication/api-keys">official guide</a> on how to get API key on Momento'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'momentoCache',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'API Key',
|
||||
name: 'momentoApiKey',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Endpoint',
|
||||
name: 'momentoEndpoint',
|
||||
type: 'string'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: MomentoCacheApi }
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class RedisCacheApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Redis Cache API'
|
||||
this.name = 'redisCacheApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Redis Host',
|
||||
name: 'redisCacheHost',
|
||||
type: 'string',
|
||||
default: '127.0.0.1'
|
||||
},
|
||||
{
|
||||
label: 'Port',
|
||||
name: 'redisCachePort',
|
||||
type: 'number',
|
||||
default: '6789'
|
||||
},
|
||||
{
|
||||
label: 'User',
|
||||
name: 'redisCacheUser',
|
||||
type: 'string',
|
||||
placeholder: '<REDIS_USERNAME>'
|
||||
},
|
||||
{
|
||||
label: 'Password',
|
||||
name: 'redisCachePwd',
|
||||
type: 'password',
|
||||
placeholder: '<REDIS_PASSWORD>'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: RedisCacheApi }
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class UpstashRedisApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Upstash Redis API'
|
||||
this.name = 'upstashRedisApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Upstash Redis REST URL',
|
||||
name: 'upstashConnectionUrl',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Token',
|
||||
name: 'upstashConnectionToken',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: UpstashRedisApi }
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class UpstashRedisMemoryApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Upstash Redis Memory API'
|
||||
this.name = 'upstashRedisMemoryApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://upstash.com/docs/redis/overall/getstarted">official guide</a> on how to create redis instance and get redis REST Token'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Upstash Redis REST Token',
|
||||
name: 'upstashRestToken',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: UpstashRedisMemoryApi }
|
||||
|
|
@ -95,8 +95,12 @@ class ConversationalAgent_Agents implements INode {
|
|||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
if (options && options.chatHistory) {
|
||||
memory.chatHistory = mapChatHistory(options)
|
||||
executor.memory = memory
|
||||
const chatHistoryClassName = memory.chatHistory.constructor.name
|
||||
// Only replace when its In-Memory
|
||||
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
|
||||
memory.chatHistory = mapChatHistory(options)
|
||||
executor.memory = memory
|
||||
}
|
||||
}
|
||||
|
||||
const result = await executor.call({ input }, [...callbacks])
|
||||
|
|
|
|||
|
|
@ -82,7 +82,11 @@ class ConversationalRetrievalAgent_Agents implements INode {
|
|||
if (executor.memory) {
|
||||
;(executor.memory as any).memoryKey = 'chat_history'
|
||||
;(executor.memory as any).outputKey = 'output'
|
||||
;(executor.memory as any).chatHistory = mapChatHistory(options)
|
||||
const chatHistoryClassName = (executor.memory as any).chatHistory.constructor.name
|
||||
// Only replace when its In-Memory
|
||||
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
|
||||
;(executor.memory as any).chatHistory = mapChatHistory(options)
|
||||
}
|
||||
}
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
|
|
|
|||
|
|
@ -81,8 +81,12 @@ class OpenAIFunctionAgent_Agents implements INode {
|
|||
const memory = nodeData.inputs?.memory as BaseChatMemory
|
||||
|
||||
if (options && options.chatHistory) {
|
||||
memory.chatHistory = mapChatHistory(options)
|
||||
executor.memory = memory
|
||||
const chatHistoryClassName = memory.chatHistory.constructor.name
|
||||
// Only replace when its In-Memory
|
||||
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
|
||||
memory.chatHistory = mapChatHistory(options)
|
||||
executor.memory = memory
|
||||
}
|
||||
}
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,57 @@
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { MomentoCache as LangchainMomentoCache } from 'langchain/cache/momento'
|
||||
import { CacheClient, Configurations, CredentialProvider } from '@gomomento/sdk'
|
||||
|
||||
class MomentoCache implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
credential: INodeParams
|
||||
|
||||
constructor() {
|
||||
this.label = 'Momento Cache'
|
||||
this.name = 'momentoCache'
|
||||
this.version = 1.0
|
||||
this.type = 'MomentoCache'
|
||||
this.icon = 'momento.png'
|
||||
this.category = 'Cache'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(LangchainMomentoCache)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
optional: true,
|
||||
credentialNames: ['momentoCacheApi']
|
||||
}
|
||||
this.inputs = []
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const apiKey = getCredentialParam('momentoApiKey', credentialData, nodeData)
|
||||
const cacheName = getCredentialParam('momentoCache', credentialData, nodeData)
|
||||
|
||||
// See https://github.com/momentohq/client-sdk-javascript for connection options
|
||||
const client = new CacheClient({
|
||||
configuration: Configurations.Laptop.v1(),
|
||||
credentialProvider: CredentialProvider.fromString({
|
||||
apiKey: apiKey
|
||||
}),
|
||||
defaultTtlSeconds: 60 * 60 * 24
|
||||
})
|
||||
|
||||
let momentoCache = await LangchainMomentoCache.fromProps({
|
||||
client,
|
||||
cacheName: cacheName
|
||||
})
|
||||
return momentoCache
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: MomentoCache }
|
||||
|
After Width: | Height: | Size: 5.4 KiB |
|
|
@ -0,0 +1,52 @@
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { RedisCache as LangchainRedisCache } from 'langchain/cache/ioredis'
|
||||
import { Redis } from 'ioredis'
|
||||
|
||||
class RedisCache implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
credential: INodeParams
|
||||
|
||||
constructor() {
|
||||
this.label = 'Redis Cache'
|
||||
this.name = 'redisCache'
|
||||
this.version = 1.0
|
||||
this.type = 'RedisCache'
|
||||
this.icon = 'redis.svg'
|
||||
this.category = 'Cache'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(LangchainRedisCache)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
optional: true,
|
||||
credentialNames: ['redisCacheApi']
|
||||
}
|
||||
this.inputs = []
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const username = getCredentialParam('redisCacheUser', credentialData, nodeData)
|
||||
const password = getCredentialParam('redisCachePwd', credentialData, nodeData)
|
||||
const portStr = getCredentialParam('redisCachePort', credentialData, nodeData)
|
||||
const host = getCredentialParam('redisCacheHost', credentialData, nodeData)
|
||||
|
||||
const client = new Redis({
|
||||
port: portStr ? parseInt(portStr) : 6379,
|
||||
host,
|
||||
username,
|
||||
password
|
||||
})
|
||||
return new LangchainRedisCache(client)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: RedisCache }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 128 128" id="redis"><path fill="#A41E11" d="M121.8 93.1c-6.7 3.5-41.4 17.7-48.8 21.6-7.4 3.9-11.5 3.8-17.3 1s-42.7-17.6-49.4-20.8c-3.3-1.6-5-2.9-5-4.2v-12.7s48-10.5 55.8-13.2c7.8-2.8 10.4-2.9 17-.5s46.1 9.5 52.6 11.9v12.5c0 1.3-1.5 2.7-4.9 4.4z"></path><path fill="#D82C20" d="M121.8 80.5c-6.7 3.5-41.4 17.7-48.8 21.6-7.4 3.9-11.5 3.8-17.3 1-5.8-2.8-42.7-17.7-49.4-20.9-6.6-3.2-6.8-5.4-.3-7.9 6.5-2.6 43.2-17 51-19.7 7.8-2.8 10.4-2.9 17-.5s41.1 16.1 47.6 18.5c6.7 2.4 6.9 4.4.2 7.9z"></path><path fill="#A41E11" d="M121.8 72.5c-6.7 3.5-41.4 17.7-48.8 21.6-7.4 3.8-11.5 3.8-17.3 1-5.8-2.8-42.7-17.7-49.4-20.9-3.3-1.6-5-2.9-5-4.2v-12.7s48-10.5 55.8-13.2c7.8-2.8 10.4-2.9 17-.5s46.1 9.5 52.6 11.9v12.5c0 1.3-1.5 2.7-4.9 4.5z"></path><path fill="#D82C20" d="M121.8 59.8c-6.7 3.5-41.4 17.7-48.8 21.6-7.4 3.8-11.5 3.8-17.3 1-5.8-2.8-42.7-17.7-49.4-20.9s-6.8-5.4-.3-7.9c6.5-2.6 43.2-17 51-19.7 7.8-2.8 10.4-2.9 17-.5s41.1 16.1 47.6 18.5c6.7 2.4 6.9 4.4.2 7.9z"></path><path fill="#A41E11" d="M121.8 51c-6.7 3.5-41.4 17.7-48.8 21.6-7.4 3.8-11.5 3.8-17.3 1-5.8-2.7-42.7-17.6-49.4-20.8-3.3-1.6-5.1-2.9-5.1-4.2v-12.7s48-10.5 55.8-13.2c7.8-2.8 10.4-2.9 17-.5s46.1 9.5 52.6 11.9v12.5c.1 1.3-1.4 2.6-4.8 4.4z"></path><path fill="#D82C20" d="M121.8 38.3c-6.7 3.5-41.4 17.7-48.8 21.6-7.4 3.8-11.5 3.8-17.3 1s-42.7-17.6-49.4-20.8-6.8-5.4-.3-7.9c6.5-2.6 43.2-17 51-19.7 7.8-2.8 10.4-2.9 17-.5s41.1 16.1 47.6 18.5c6.7 2.4 6.9 4.4.2 7.8z"></path><path fill="#fff" d="M80.4 26.1l-10.8 1.2-2.5 5.8-3.9-6.5-12.5-1.1 9.3-3.4-2.8-5.2 8.8 3.4 8.2-2.7-2.2 5.4zM66.5 54.5l-20.3-8.4 29.1-4.4z"></path><ellipse cx="38.4" cy="35.4" fill="#fff" rx="15.5" ry="6"></ellipse><path fill="#7A0C00" d="M93.3 27.7l17.2 6.8-17.2 6.8z"></path><path fill="#AD2115" d="M74.3 35.3l19-7.6v13.6l-1.9.8z"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.8 KiB |
|
|
@ -0,0 +1,49 @@
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { UpstashRedisCache as LangchainUpstashRedisCache } from 'langchain/cache/upstash_redis'
|
||||
|
||||
class UpstashRedisCache implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
credential: INodeParams
|
||||
|
||||
constructor() {
|
||||
this.label = 'Upstash Redis Cache'
|
||||
this.name = 'upstashRedisCache'
|
||||
this.version = 1.0
|
||||
this.type = 'UpstashRedisCache'
|
||||
this.icon = 'upstash.png'
|
||||
this.category = 'Cache'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(LangchainUpstashRedisCache)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
optional: true,
|
||||
credentialNames: ['upstashRedisApi']
|
||||
}
|
||||
this.inputs = []
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const upstashConnectionUrl = getCredentialParam('upstashConnectionUrl', credentialData, nodeData)
|
||||
const upstashToken = getCredentialParam('upstashConnectionToken', credentialData, nodeData)
|
||||
|
||||
const cache = new LangchainUpstashRedisCache({
|
||||
config: {
|
||||
url: upstashConnectionUrl,
|
||||
token: upstashToken
|
||||
}
|
||||
})
|
||||
return cache
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: UpstashRedisCache }
|
||||
|
After Width: | Height: | Size: 2.6 KiB |
|
|
@ -106,8 +106,12 @@ class ConversationChain_Chains implements INode {
|
|||
const memory = nodeData.inputs?.memory as BufferMemory
|
||||
|
||||
if (options && options.chatHistory) {
|
||||
memory.chatHistory = mapChatHistory(options)
|
||||
chain.memory = memory
|
||||
const chatHistoryClassName = memory.chatHistory.constructor.name
|
||||
// Only replace when its In-Memory
|
||||
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
|
||||
memory.chatHistory = mapChatHistory(options)
|
||||
chain.memory = memory
|
||||
}
|
||||
}
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
|
|
|
|||
|
|
@ -179,7 +179,11 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
const obj = { question: input }
|
||||
|
||||
if (options && options.chatHistory && chain.memory) {
|
||||
;(chain.memory as any).chatHistory = mapChatHistory(options)
|
||||
const chatHistoryClassName = (chain.memory as any).chatHistory.constructor.name
|
||||
// Only replace when its In-Memory
|
||||
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
|
||||
;(chain.memory as any).chatHistory = mapChatHistory(options)
|
||||
}
|
||||
}
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { SqlDatabaseChain, SqlDatabaseChainInput } from 'langchain/chains/sql_db'
|
||||
import { SqlDatabaseChain, SqlDatabaseChainInput, DEFAULT_SQL_DATABASE_PROMPT } from 'langchain/chains/sql_db'
|
||||
import { getBaseClasses, getInputVariables } from '../../../src/utils'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { SqlDatabase } from 'langchain/sql_db'
|
||||
|
|
@ -10,25 +10,6 @@ import { DataSourceOptions } from 'typeorm/data-source'
|
|||
|
||||
type DatabaseType = 'sqlite' | 'postgres' | 'mssql' | 'mysql'
|
||||
|
||||
const defaultPrompt = `Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.
|
||||
|
||||
Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
|
||||
|
||||
Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
|
||||
|
||||
Use the following format:
|
||||
|
||||
Question: "Question here"
|
||||
SQLQuery: "SQL Query to run"
|
||||
SQLResult: "Result of the SQLQuery"
|
||||
Answer: "Final answer here"
|
||||
|
||||
Only use the tables listed below.
|
||||
|
||||
{table_info}
|
||||
|
||||
Question: {input}`
|
||||
|
||||
class SqlDatabaseChain_Chains implements INode {
|
||||
label: string
|
||||
name: string
|
||||
|
|
@ -43,7 +24,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Sql Database Chain'
|
||||
this.name = 'sqlDatabaseChain'
|
||||
this.version = 3.0
|
||||
this.version = 4.0
|
||||
this.type = 'SqlDatabaseChain'
|
||||
this.icon = 'sqlchain.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -89,7 +70,8 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
label: 'Include Tables',
|
||||
name: 'includesTables',
|
||||
type: 'string',
|
||||
description: 'Tables to include for queries.',
|
||||
description: 'Tables to include for queries, seperated by comma. Can only use Include Tables or Ignore Tables',
|
||||
placeholder: 'table1, table2',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
|
|
@ -97,7 +79,8 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
label: 'Ignore Tables',
|
||||
name: 'ignoreTables',
|
||||
type: 'string',
|
||||
description: 'Tables to ignore for queries.',
|
||||
description: 'Tables to ignore for queries, seperated by comma. Can only use Ignore Tables or Include Tables',
|
||||
placeholder: 'table1, table2',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
|
|
@ -129,7 +112,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
warning:
|
||||
'Prompt must include 3 input variables: {input}, {dialect}, {table_info}. You can refer to official guide from description above',
|
||||
rows: 4,
|
||||
placeholder: defaultPrompt,
|
||||
placeholder: DEFAULT_SQL_DATABASE_PROMPT.template + DEFAULT_SQL_DATABASE_PROMPT.templateFormat,
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,176 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatBedrock } from 'langchain/chat_models/bedrock'
|
||||
import { BaseBedrockInput } from 'langchain/dist/util/bedrock'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
/**
|
||||
* I had to run the following to build the component
|
||||
* and get the icon copied over to the dist directory
|
||||
* Flowise/packages/components > yarn build
|
||||
*
|
||||
* @author Michael Connor <mlconnor@yahoo.com>
|
||||
*/
|
||||
class AWSChatBedrock_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'AWS Bedrock'
|
||||
this.name = 'awsChatBedrock'
|
||||
this.version = 2.0
|
||||
this.type = 'AWSChatBedrock'
|
||||
this.icon = 'awsBedrock.png'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around AWS Bedrock large language models'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatBedrock)]
|
||||
this.credential = {
|
||||
label: 'AWS Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['awsApi'],
|
||||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Region',
|
||||
name: 'region',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'af-south-1', name: 'af-south-1' },
|
||||
{ label: 'ap-east-1', name: 'ap-east-1' },
|
||||
{ label: 'ap-northeast-1', name: 'ap-northeast-1' },
|
||||
{ label: 'ap-northeast-2', name: 'ap-northeast-2' },
|
||||
{ label: 'ap-northeast-3', name: 'ap-northeast-3' },
|
||||
{ label: 'ap-south-1', name: 'ap-south-1' },
|
||||
{ label: 'ap-south-2', name: 'ap-south-2' },
|
||||
{ label: 'ap-southeast-1', name: 'ap-southeast-1' },
|
||||
{ label: 'ap-southeast-2', name: 'ap-southeast-2' },
|
||||
{ label: 'ap-southeast-3', name: 'ap-southeast-3' },
|
||||
{ label: 'ap-southeast-4', name: 'ap-southeast-4' },
|
||||
{ label: 'ap-southeast-5', name: 'ap-southeast-5' },
|
||||
{ label: 'ap-southeast-6', name: 'ap-southeast-6' },
|
||||
{ label: 'ca-central-1', name: 'ca-central-1' },
|
||||
{ label: 'ca-west-1', name: 'ca-west-1' },
|
||||
{ label: 'cn-north-1', name: 'cn-north-1' },
|
||||
{ label: 'cn-northwest-1', name: 'cn-northwest-1' },
|
||||
{ label: 'eu-central-1', name: 'eu-central-1' },
|
||||
{ label: 'eu-central-2', name: 'eu-central-2' },
|
||||
{ label: 'eu-north-1', name: 'eu-north-1' },
|
||||
{ label: 'eu-south-1', name: 'eu-south-1' },
|
||||
{ label: 'eu-south-2', name: 'eu-south-2' },
|
||||
{ label: 'eu-west-1', name: 'eu-west-1' },
|
||||
{ label: 'eu-west-2', name: 'eu-west-2' },
|
||||
{ label: 'eu-west-3', name: 'eu-west-3' },
|
||||
{ label: 'il-central-1', name: 'il-central-1' },
|
||||
{ label: 'me-central-1', name: 'me-central-1' },
|
||||
{ label: 'me-south-1', name: 'me-south-1' },
|
||||
{ label: 'sa-east-1', name: 'sa-east-1' },
|
||||
{ label: 'us-east-1', name: 'us-east-1' },
|
||||
{ label: 'us-east-2', name: 'us-east-2' },
|
||||
{ label: 'us-gov-east-1', name: 'us-gov-east-1' },
|
||||
{ label: 'us-gov-west-1', name: 'us-gov-west-1' },
|
||||
{ label: 'us-west-1', name: 'us-west-1' },
|
||||
{ label: 'us-west-2', name: 'us-west-2' }
|
||||
],
|
||||
default: 'us-east-1',
|
||||
optional: false
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'model',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'amazon.titan-tg1-large', name: 'amazon.titan-tg1-large' },
|
||||
{ label: 'amazon.titan-e1t-medium', name: 'amazon.titan-e1t-medium' },
|
||||
{ label: 'stability.stable-diffusion-xl', name: 'stability.stable-diffusion-xl' },
|
||||
{ label: 'ai21.j2-grande-instruct', name: 'ai21.j2-grande-instruct' },
|
||||
{ label: 'ai21.j2-jumbo-instruct', name: 'ai21.j2-jumbo-instruct' },
|
||||
{ label: 'ai21.j2-mid', name: 'ai21.j2-mid' },
|
||||
{ label: 'ai21.j2-ultra', name: 'ai21.j2-ultra' },
|
||||
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
|
||||
{ label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
|
||||
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' }
|
||||
],
|
||||
default: 'anthropic.claude-v2',
|
||||
optional: false
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
description: 'Temperature parameter may not apply to certain model. Please check available model parameters',
|
||||
optional: true,
|
||||
default: 0.7,
|
||||
additionalParams: false
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens to Sample',
|
||||
name: 'max_tokens_to_sample',
|
||||
type: 'number',
|
||||
step: 10,
|
||||
description: 'Max Tokens parameter may not apply to certain model. Please check available model parameters',
|
||||
optional: false,
|
||||
default: 200,
|
||||
additionalParams: false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const iRegion = nodeData.inputs?.region as string
|
||||
const iModel = nodeData.inputs?.model as string
|
||||
const iTemperature = nodeData.inputs?.temperature as string
|
||||
const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: BaseBedrockInput & BaseLLMParams = {
|
||||
region: iRegion,
|
||||
model: iModel,
|
||||
maxTokens: parseInt(iMax_tokens_to_sample, 10),
|
||||
temperature: parseFloat(iTemperature)
|
||||
}
|
||||
|
||||
/**
|
||||
* Long-term credentials specified in LLM configuration are optional.
|
||||
* Bedrock's credential provider falls back to the AWS SDK to fetch
|
||||
* credentials from the running environment.
|
||||
* When specified, we override the default provider with configured values.
|
||||
* @see https://github.com/aws/aws-sdk-js-v3/blob/main/packages/credential-provider-node/README.md
|
||||
*/
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
if (credentialData && Object.keys(credentialData).length !== 0) {
|
||||
const credentialApiKey = getCredentialParam('awsKey', credentialData, nodeData)
|
||||
const credentialApiSecret = getCredentialParam('awsSecret', credentialData, nodeData)
|
||||
const credentialApiSession = getCredentialParam('awsSession', credentialData, nodeData)
|
||||
|
||||
obj.credentials = {
|
||||
accessKeyId: credentialApiKey,
|
||||
secretAccessKey: credentialApiSecret,
|
||||
sessionToken: credentialApiSession
|
||||
}
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const amazonBedrock = new ChatBedrock(obj)
|
||||
return amazonBedrock
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: AWSChatBedrock_ChatModels }
|
||||
|
After Width: | Height: | Size: 61 KiB |
|
|
@ -2,6 +2,8 @@ import { OpenAIBaseInput } from 'langchain/dist/types/openai-types'
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { AzureOpenAIInput, ChatOpenAI } from 'langchain/chat_models/openai'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
class AzureChatOpenAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +20,7 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'Azure ChatOpenAI'
|
||||
this.name = 'azureChatOpenAI'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AzureChatOpenAI'
|
||||
this.icon = 'Azure.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -31,6 +33,12 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
credentialNames: ['azureOpenAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -107,6 +115,7 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
const presencePenalty = nodeData.inputs?.presencePenalty as string
|
||||
const timeout = nodeData.inputs?.timeout as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
|
||||
|
|
@ -114,7 +123,7 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
|
||||
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<AzureOpenAIInput> & Partial<OpenAIBaseInput> = {
|
||||
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIBaseInput> = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
azureOpenAIApiKey,
|
||||
|
|
@ -128,6 +137,7 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
|
||||
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatOpenAI(obj)
|
||||
return model
|
||||
|
|
|
|||
|
|
@ -0,0 +1,57 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { NIBittensorChatModel, BittensorInput } from 'langchain/experimental/chat_models/bittensor'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
|
||||
class Bittensor_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'NIBittensorChat'
|
||||
this.name = 'NIBittensorChatModel'
|
||||
this.version = 2.0
|
||||
this.type = 'BittensorChat'
|
||||
this.icon = 'logo.png'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Bittensor subnet 1 large language models'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(NIBittensorChatModel)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'System prompt',
|
||||
name: 'system_prompt',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string): Promise<any> {
|
||||
const system_prompt = nodeData.inputs?.system_prompt as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<BittensorInput> = {
|
||||
systemPrompt: system_prompt
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new NIBittensorChatModel(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Bittensor_ChatModels }
|
||||
|
After Width: | Height: | Size: 24 KiB |
|
|
@ -1,6 +1,8 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { AnthropicInput, ChatAnthropic } from 'langchain/chat_models/anthropic'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
class ChatAnthropic_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +19,7 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatAnthropic'
|
||||
this.name = 'chatAnthropic'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatAnthropic'
|
||||
this.icon = 'chatAnthropic.png'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -30,6 +32,12 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
credentialNames: ['anthropicApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -135,11 +143,12 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
const topP = nodeData.inputs?.topP as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<AnthropicInput> & { anthropicApiKey?: string } = {
|
||||
const obj: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string } = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
anthropicApiKey,
|
||||
|
|
@ -149,6 +158,7 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
if (maxTokensToSample) obj.maxTokensToSample = parseInt(maxTokensToSample, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatAnthropic(obj)
|
||||
return model
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatGooglePaLM, GooglePaLMChatInput } from 'langchain/chat_models/googlepalm'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
|
||||
class ChatGooglePaLM_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +18,7 @@ class ChatGooglePaLM_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatGooglePaLM'
|
||||
this.name = 'chatGooglePaLM'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatGooglePaLM'
|
||||
this.icon = 'Google_PaLM_Logo.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -30,6 +31,12 @@ class ChatGooglePaLM_ChatModels implements INode {
|
|||
credentialNames: ['googleMakerSuite']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -96,6 +103,7 @@ class ChatGooglePaLM_ChatModels implements INode {
|
|||
const temperature = nodeData.inputs?.temperature as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const googleMakerSuiteKey = getCredentialParam('googleMakerSuiteKey', credentialData, nodeData)
|
||||
|
|
@ -108,6 +116,7 @@ class ChatGooglePaLM_ChatModels implements INode {
|
|||
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatGooglePaLM(obj)
|
||||
return model
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatGoogleVertexAI, GoogleVertexAIChatInput } from 'langchain/chat_models/googlevertexai'
|
||||
import { GoogleAuthOptions } from 'google-auth-library'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
|
||||
class GoogleVertexAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +19,7 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatGoogleVertexAI'
|
||||
this.name = 'chatGoogleVertexAI'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatGoogleVertexAI'
|
||||
this.icon = 'vertexai.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -34,6 +35,12 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
'Google Vertex AI credential. If you are using a GCP service like Cloud Run, or if you have installed default credentials on your local machine, you do not need to set this credential.'
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -113,6 +120,7 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: GoogleVertexAIChatInput<GoogleAuthOptions> = {
|
||||
temperature: parseFloat(temperature),
|
||||
|
|
@ -122,6 +130,7 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
|
||||
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatGoogleVertexAI(obj)
|
||||
return model
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { HFInput, HuggingFaceInference } from './core'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
|
||||
class ChatHuggingFace_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +18,7 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatHuggingFace'
|
||||
this.name = 'chatHuggingFace'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatHuggingFace'
|
||||
this.icon = 'huggingface.png'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -30,6 +31,12 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
credentialNames: ['huggingFaceApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'model',
|
||||
|
|
@ -102,6 +109,7 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
const hfTopK = nodeData.inputs?.hfTopK as string
|
||||
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
|
||||
const endpoint = nodeData.inputs?.endpoint as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
|
||||
|
|
@ -119,6 +127,7 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
if (endpoint) obj.endpoint = endpoint
|
||||
|
||||
const huggingFace = new HuggingFaceInference(obj)
|
||||
if (cache) huggingFace.cache = cache
|
||||
return huggingFace
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
|||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { OpenAIChat } from 'langchain/llms/openai'
|
||||
import { OpenAIChatInput } from 'langchain/chat_models/openai'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
class ChatLocalAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -17,13 +19,19 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatLocalAI'
|
||||
this.name = 'chatLocalAI'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatLocalAI'
|
||||
this.icon = 'localai.png'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Use local LLMs like llama.cpp, gpt4all using LocalAI'
|
||||
this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(OpenAIChat)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Base Path',
|
||||
name: 'basePath',
|
||||
|
|
@ -78,8 +86,9 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
const topP = nodeData.inputs?.topP as string
|
||||
const timeout = nodeData.inputs?.timeout as string
|
||||
const basePath = nodeData.inputs?.basePath as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<OpenAIChatInput> & { openAIApiKey?: string } = {
|
||||
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey: 'sk-'
|
||||
|
|
@ -88,6 +97,7 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new OpenAIChat(obj, { basePath })
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
class ChatOpenAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +19,7 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatOpenAI'
|
||||
this.name = 'chatOpenAI'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatOpenAI'
|
||||
this.icon = 'openai.png'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -30,6 +32,12 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
credentialNames: ['openAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -151,7 +159,9 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<OpenAIChatInput> & { openAIApiKey?: string } = {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey,
|
||||
|
|
@ -163,6 +173,7 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
|
||||
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
class ChatOpenAICustom_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +19,7 @@ class ChatOpenAICustom_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatOpenAI Custom'
|
||||
this.name = 'chatOpenAICustom'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatOpenAI-Custom'
|
||||
this.icon = 'openai.png'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -31,6 +33,12 @@ class ChatOpenAICustom_ChatModels implements INode {
|
|||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -113,11 +121,12 @@ class ChatOpenAICustom_ChatModels implements INode {
|
|||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const basePath = nodeData.inputs?.basepath as string
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<OpenAIChatInput> & { openAIApiKey?: string } = {
|
||||
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey,
|
||||
|
|
@ -129,6 +138,7 @@ class ChatOpenAICustom_ChatModels implements INode {
|
|||
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
|
||||
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,112 @@
|
|||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import { Document } from 'langchain/document'
|
||||
import { handleEscapeCharacters } from '../../../src'
|
||||
|
||||
class PlainText_DocumentLoaders implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Plain Text'
|
||||
this.name = 'plainText'
|
||||
this.version = 2.0
|
||||
this.type = 'Document'
|
||||
this.icon = 'plaintext.svg'
|
||||
this.category = 'Document Loaders'
|
||||
this.description = `Load data from plain text`
|
||||
this.baseClasses = [this.type]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
placeholder:
|
||||
'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua...'
|
||||
},
|
||||
{
|
||||
label: 'Text Splitter',
|
||||
name: 'textSplitter',
|
||||
type: 'TextSplitter',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Metadata',
|
||||
name: 'metadata',
|
||||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
baseClasses: ['string', 'json']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
|
||||
const text = nodeData.inputs?.text as string
|
||||
const metadata = nodeData.inputs?.metadata
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
let alldocs: Document<Record<string, any>>[] = []
|
||||
|
||||
if (textSplitter) {
|
||||
const docs = await textSplitter.createDocuments([text])
|
||||
alldocs.push(...docs)
|
||||
} else {
|
||||
alldocs.push(
|
||||
new Document({
|
||||
pageContent: text
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
let finaldocs: Document<Record<string, any>>[] = []
|
||||
if (metadata) {
|
||||
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)
|
||||
for (const doc of alldocs) {
|
||||
const newdoc = {
|
||||
...doc,
|
||||
metadata: {
|
||||
...doc.metadata,
|
||||
...parsedMetadata
|
||||
}
|
||||
}
|
||||
finaldocs.push(newdoc)
|
||||
}
|
||||
} else {
|
||||
finaldocs = alldocs
|
||||
}
|
||||
|
||||
if (output === 'document') {
|
||||
return finaldocs
|
||||
} else {
|
||||
let finaltext = ''
|
||||
for (const doc of finaldocs) {
|
||||
finaltext += `${doc.pageContent}\n`
|
||||
}
|
||||
return handleEscapeCharacters(finaltext, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: PlainText_DocumentLoaders }
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-highlight" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path stroke="none" d="M0 0h24v24H0z" fill="none"></path>
|
||||
<path d="M3 19h4l10.5 -10.5a2.828 2.828 0 1 0 -4 -4l-10.5 10.5v4"></path>
|
||||
<path d="M12.5 5.5l4 4"></path>
|
||||
<path d="M4.5 13.5l4 4"></path>
|
||||
<path d="M21 15v4h-8l4 -4z"></path>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 482 B |
|
|
@ -1,6 +1,8 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import { TextLoader } from 'langchain/document_loaders/fs/text'
|
||||
import { Document } from 'langchain/document'
|
||||
import { handleEscapeCharacters } from '../../../src'
|
||||
|
||||
class Text_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -12,11 +14,12 @@ class Text_DocumentLoaders implements INode {
|
|||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Text File'
|
||||
this.name = 'textFile'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'Document'
|
||||
this.icon = 'textFile.svg'
|
||||
this.category = 'Document Loaders'
|
||||
|
|
@ -43,12 +46,25 @@ class Text_DocumentLoaders implements INode {
|
|||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
baseClasses: ['string', 'json']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
|
||||
const txtFileBase64 = nodeData.inputs?.txtFile as string
|
||||
const metadata = nodeData.inputs?.metadata
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
let alldocs = []
|
||||
let files: string[] = []
|
||||
|
|
@ -75,9 +91,9 @@ class Text_DocumentLoaders implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
let finaldocs: Document<Record<string, any>>[] = []
|
||||
if (metadata) {
|
||||
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)
|
||||
let finaldocs = []
|
||||
for (const doc of alldocs) {
|
||||
const newdoc = {
|
||||
...doc,
|
||||
|
|
@ -88,9 +104,19 @@ class Text_DocumentLoaders implements INode {
|
|||
}
|
||||
finaldocs.push(newdoc)
|
||||
}
|
||||
return finaldocs
|
||||
} else {
|
||||
finaldocs = alldocs
|
||||
}
|
||||
|
||||
if (output === 'document') {
|
||||
return finaldocs
|
||||
} else {
|
||||
let finaltext = ''
|
||||
for (const doc of finaldocs) {
|
||||
finaltext += `${doc.pageContent}\n`
|
||||
}
|
||||
return handleEscapeCharacters(finaltext, false)
|
||||
}
|
||||
return alldocs
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,175 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { Bedrock } from 'langchain/llms/bedrock'
|
||||
import { BaseBedrockInput } from 'langchain/dist/util/bedrock'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
/**
|
||||
* I had to run the following to build the component
|
||||
* and get the icon copied over to the dist directory
|
||||
* Flowise/packages/components > yarn build
|
||||
*
|
||||
* @author Michael Connor <mlconnor@yahoo.com>
|
||||
*/
|
||||
class AWSBedrock_LLMs implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'AWS Bedrock'
|
||||
this.name = 'awsBedrock'
|
||||
this.version = 1.2
|
||||
this.type = 'AWSBedrock'
|
||||
this.icon = 'awsBedrock.png'
|
||||
this.category = 'LLMs'
|
||||
this.description = 'Wrapper around AWS Bedrock large language models'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(Bedrock)]
|
||||
this.credential = {
|
||||
label: 'AWS Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['awsApi'],
|
||||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Region',
|
||||
name: 'region',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'af-south-1', name: 'af-south-1' },
|
||||
{ label: 'ap-east-1', name: 'ap-east-1' },
|
||||
{ label: 'ap-northeast-1', name: 'ap-northeast-1' },
|
||||
{ label: 'ap-northeast-2', name: 'ap-northeast-2' },
|
||||
{ label: 'ap-northeast-3', name: 'ap-northeast-3' },
|
||||
{ label: 'ap-south-1', name: 'ap-south-1' },
|
||||
{ label: 'ap-south-2', name: 'ap-south-2' },
|
||||
{ label: 'ap-southeast-1', name: 'ap-southeast-1' },
|
||||
{ label: 'ap-southeast-2', name: 'ap-southeast-2' },
|
||||
{ label: 'ap-southeast-3', name: 'ap-southeast-3' },
|
||||
{ label: 'ap-southeast-4', name: 'ap-southeast-4' },
|
||||
{ label: 'ap-southeast-5', name: 'ap-southeast-5' },
|
||||
{ label: 'ap-southeast-6', name: 'ap-southeast-6' },
|
||||
{ label: 'ca-central-1', name: 'ca-central-1' },
|
||||
{ label: 'ca-west-1', name: 'ca-west-1' },
|
||||
{ label: 'cn-north-1', name: 'cn-north-1' },
|
||||
{ label: 'cn-northwest-1', name: 'cn-northwest-1' },
|
||||
{ label: 'eu-central-1', name: 'eu-central-1' },
|
||||
{ label: 'eu-central-2', name: 'eu-central-2' },
|
||||
{ label: 'eu-north-1', name: 'eu-north-1' },
|
||||
{ label: 'eu-south-1', name: 'eu-south-1' },
|
||||
{ label: 'eu-south-2', name: 'eu-south-2' },
|
||||
{ label: 'eu-west-1', name: 'eu-west-1' },
|
||||
{ label: 'eu-west-2', name: 'eu-west-2' },
|
||||
{ label: 'eu-west-3', name: 'eu-west-3' },
|
||||
{ label: 'il-central-1', name: 'il-central-1' },
|
||||
{ label: 'me-central-1', name: 'me-central-1' },
|
||||
{ label: 'me-south-1', name: 'me-south-1' },
|
||||
{ label: 'sa-east-1', name: 'sa-east-1' },
|
||||
{ label: 'us-east-1', name: 'us-east-1' },
|
||||
{ label: 'us-east-2', name: 'us-east-2' },
|
||||
{ label: 'us-gov-east-1', name: 'us-gov-east-1' },
|
||||
{ label: 'us-gov-west-1', name: 'us-gov-west-1' },
|
||||
{ label: 'us-west-1', name: 'us-west-1' },
|
||||
{ label: 'us-west-2', name: 'us-west-2' }
|
||||
],
|
||||
default: 'us-east-1',
|
||||
optional: false
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'model',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'amazon.titan-tg1-large', name: 'amazon.titan-tg1-large' },
|
||||
{ label: 'amazon.titan-e1t-medium', name: 'amazon.titan-e1t-medium' },
|
||||
{ label: 'stability.stable-diffusion-xl', name: 'stability.stable-diffusion-xl' },
|
||||
{ label: 'ai21.j2-grande-instruct', name: 'ai21.j2-grande-instruct' },
|
||||
{ label: 'ai21.j2-jumbo-instruct', name: 'ai21.j2-jumbo-instruct' },
|
||||
{ label: 'ai21.j2-mid', name: 'ai21.j2-mid' },
|
||||
{ label: 'ai21.j2-ultra', name: 'ai21.j2-ultra' },
|
||||
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
|
||||
{ label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
|
||||
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' }
|
||||
],
|
||||
default: 'anthropic.claude-v2',
|
||||
optional: false
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
description: 'Temperature parameter may not apply to certain model. Please check available model parameters',
|
||||
optional: true,
|
||||
default: 0.7,
|
||||
additionalParams: false
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens to Sample',
|
||||
name: 'max_tokens_to_sample',
|
||||
type: 'number',
|
||||
step: 10,
|
||||
description: 'Max Tokens parameter may not apply to certain model. Please check available model parameters',
|
||||
optional: false,
|
||||
default: 200,
|
||||
additionalParams: false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const iRegion = nodeData.inputs?.region as string
|
||||
const iModel = nodeData.inputs?.model as string
|
||||
const iTemperature = nodeData.inputs?.temperature as string
|
||||
const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const obj: Partial<BaseBedrockInput> & BaseLLMParams = {
|
||||
model: iModel,
|
||||
region: iRegion,
|
||||
temperature: parseFloat(iTemperature),
|
||||
maxTokens: parseInt(iMax_tokens_to_sample, 10)
|
||||
}
|
||||
|
||||
/**
|
||||
* Long-term credentials specified in LLM configuration are optional.
|
||||
* Bedrock's credential provider falls back to the AWS SDK to fetch
|
||||
* credentials from the running environment.
|
||||
* When specified, we override the default provider with configured values.
|
||||
* @see https://github.com/aws/aws-sdk-js-v3/blob/main/packages/credential-provider-node/README.md
|
||||
*/
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
if (credentialData && Object.keys(credentialData).length !== 0) {
|
||||
const credentialApiKey = getCredentialParam('awsKey', credentialData, nodeData)
|
||||
const credentialApiSecret = getCredentialParam('awsSecret', credentialData, nodeData)
|
||||
const credentialApiSession = getCredentialParam('awsSession', credentialData, nodeData)
|
||||
|
||||
obj.credentials = {
|
||||
accessKeyId: credentialApiKey,
|
||||
secretAccessKey: credentialApiSecret,
|
||||
sessionToken: credentialApiSession
|
||||
}
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const amazonBedrock = new Bedrock(obj)
|
||||
return amazonBedrock
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: AWSBedrock_LLMs }
|
||||
|
After Width: | Height: | Size: 61 KiB |
|
|
@ -1,7 +1,8 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { AzureOpenAIInput, OpenAI, OpenAIInput } from 'langchain/llms/openai'
|
||||
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
class AzureOpenAI_LLMs implements INode {
|
||||
label: string
|
||||
name: string
|
||||
|
|
@ -17,7 +18,7 @@ class AzureOpenAI_LLMs implements INode {
|
|||
constructor() {
|
||||
this.label = 'Azure OpenAI'
|
||||
this.name = 'azureOpenAI'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AzureOpenAI'
|
||||
this.icon = 'Azure.svg'
|
||||
this.category = 'LLMs'
|
||||
|
|
@ -30,6 +31,12 @@ class AzureOpenAI_LLMs implements INode {
|
|||
credentialNames: ['azureOpenAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -163,7 +170,9 @@ class AzureOpenAI_LLMs implements INode {
|
|||
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
|
||||
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<AzureOpenAIInput> & Partial<OpenAIInput> = {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIInput> = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
azureOpenAIApiKey,
|
||||
|
|
@ -179,6 +188,7 @@ class AzureOpenAI_LLMs implements INode {
|
|||
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (bestOf) obj.bestOf = parseInt(bestOf, 10)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new OpenAI(obj)
|
||||
return model
|
||||
|
|
|
|||
|
|
@ -0,0 +1,68 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { NIBittensorLLM, BittensorInput } from 'langchain/experimental/llms/bittensor'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
class Bittensor_LLMs implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'NIBittensorLLM'
|
||||
this.name = 'NIBittensorLLM'
|
||||
this.version = 2.0
|
||||
this.type = 'Bittensor'
|
||||
this.icon = 'logo.png'
|
||||
this.category = 'LLMs'
|
||||
this.description = 'Wrapper around Bittensor subnet 1 large language models'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(NIBittensorLLM)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'System prompt',
|
||||
name: 'system_prompt',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Top Responses',
|
||||
name: 'topResponses',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string): Promise<any> {
|
||||
const system_prompt = nodeData.inputs?.system_prompt as string
|
||||
const topResponses = Number(nodeData.inputs?.topResponses as number)
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<BittensorInput> & BaseLLMParams = {
|
||||
systemPrompt: system_prompt,
|
||||
topResponses: topResponses
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new NIBittensorLLM(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Bittensor_LLMs }
|
||||
|
After Width: | Height: | Size: 24 KiB |
|
|
@ -1,6 +1,7 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { Cohere, CohereInput } from './core'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
|
||||
class Cohere_LLMs implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +18,7 @@ class Cohere_LLMs implements INode {
|
|||
constructor() {
|
||||
this.label = 'Cohere'
|
||||
this.name = 'cohere'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'Cohere'
|
||||
this.icon = 'cohere.png'
|
||||
this.category = 'LLMs'
|
||||
|
|
@ -30,6 +31,12 @@ class Cohere_LLMs implements INode {
|
|||
credentialNames: ['cohereApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -85,7 +92,7 @@ class Cohere_LLMs implements INode {
|
|||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const cohereApiKey = getCredentialParam('cohereApiKey', credentialData, nodeData)
|
||||
|
||||
|
|
@ -96,7 +103,7 @@ class Cohere_LLMs implements INode {
|
|||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (modelName) obj.model = modelName
|
||||
if (temperature) obj.temperature = parseFloat(temperature)
|
||||
|
||||
if (cache) obj.cache = cache
|
||||
const model = new Cohere(obj)
|
||||
return model
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { GooglePaLM, GooglePaLMTextInput } from 'langchain/llms/googlepalm'
|
||||
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
class GooglePaLM_LLMs implements INode {
|
||||
label: string
|
||||
name: string
|
||||
|
|
@ -17,7 +17,7 @@ class GooglePaLM_LLMs implements INode {
|
|||
constructor() {
|
||||
this.label = 'GooglePaLM'
|
||||
this.name = 'GooglePaLM'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'GooglePaLM'
|
||||
this.icon = 'Google_PaLM_Logo.svg'
|
||||
this.category = 'LLMs'
|
||||
|
|
@ -30,6 +30,12 @@ class GooglePaLM_LLMs implements INode {
|
|||
credentialNames: ['googleMakerSuite']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -126,6 +132,7 @@ class GooglePaLM_LLMs implements INode {
|
|||
const topP = nodeData.inputs?.topP as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const stopSequencesObj = nodeData.inputs?.stopSequencesObj
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const googleMakerSuiteKey = getCredentialParam('googleMakerSuiteKey', credentialData, nodeData)
|
||||
|
|
@ -139,6 +146,7 @@ class GooglePaLM_LLMs implements INode {
|
|||
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
let parsedStopSequences: any | undefined = undefined
|
||||
if (stopSequencesObj) {
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { GoogleVertexAI, GoogleVertexAITextInput } from 'langchain/llms/googlevertexai'
|
||||
import { GoogleAuthOptions } from 'google-auth-library'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
|
||||
class GoogleVertexAI_LLMs implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +19,7 @@ class GoogleVertexAI_LLMs implements INode {
|
|||
constructor() {
|
||||
this.label = 'GoogleVertexAI'
|
||||
this.name = 'googlevertexai'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'GoogleVertexAI'
|
||||
this.icon = 'vertexai.svg'
|
||||
this.category = 'LLMs'
|
||||
|
|
@ -34,6 +35,12 @@ class GoogleVertexAI_LLMs implements INode {
|
|||
'Google Vertex AI credential. If you are using a GCP service like Cloud Run, or if you have installed default credentials on your local machine, you do not need to set this credential.'
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -120,6 +127,7 @@ class GoogleVertexAI_LLMs implements INode {
|
|||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<GoogleVertexAITextInput> = {
|
||||
temperature: parseFloat(temperature),
|
||||
|
|
@ -129,6 +137,7 @@ class GoogleVertexAI_LLMs implements INode {
|
|||
|
||||
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new GoogleVertexAI(obj)
|
||||
return model
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { HFInput, HuggingFaceInference } from './core'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
|
||||
class HuggingFaceInference_LLMs implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +18,7 @@ class HuggingFaceInference_LLMs implements INode {
|
|||
constructor() {
|
||||
this.label = 'HuggingFace Inference'
|
||||
this.name = 'huggingFaceInference_LLMs'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'HuggingFaceInference'
|
||||
this.icon = 'huggingface.png'
|
||||
this.category = 'LLMs'
|
||||
|
|
@ -30,6 +31,12 @@ class HuggingFaceInference_LLMs implements INode {
|
|||
credentialNames: ['huggingFaceApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'model',
|
||||
|
|
@ -106,6 +113,8 @@ class HuggingFaceInference_LLMs implements INode {
|
|||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
|
||||
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<HFInput> = {
|
||||
model,
|
||||
apiKey: huggingFaceApiKey
|
||||
|
|
@ -119,6 +128,8 @@ class HuggingFaceInference_LLMs implements INode {
|
|||
if (endpoint) obj.endpoint = endpoint
|
||||
|
||||
const huggingFace = new HuggingFaceInference(obj)
|
||||
if (cache) huggingFace.cache = cache
|
||||
|
||||
return huggingFace
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { OpenAI, OpenAIInput } from 'langchain/llms/openai'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
|
||||
class OpenAI_LLMs implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +19,7 @@ class OpenAI_LLMs implements INode {
|
|||
constructor() {
|
||||
this.label = 'OpenAI'
|
||||
this.name = 'openAI'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'OpenAI'
|
||||
this.icon = 'openai.png'
|
||||
this.category = 'LLMs'
|
||||
|
|
@ -30,6 +32,12 @@ class OpenAI_LLMs implements INode {
|
|||
credentialNames: ['openAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -149,7 +157,9 @@ class OpenAI_LLMs implements INode {
|
|||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<OpenAIInput> & { openAIApiKey?: string } = {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<OpenAIInput> & BaseLLMParams & { openAIApiKey?: string } = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey,
|
||||
|
|
@ -164,8 +174,9 @@ class OpenAI_LLMs implements INode {
|
|||
if (batchSize) obj.batchSize = parseInt(batchSize, 10)
|
||||
if (bestOf) obj.bestOf = parseInt(bestOf, 10)
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
if (baseOptions) {
|
||||
try {
|
||||
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { Replicate, ReplicateInput } from 'langchain/llms/replicate'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
class Replicate_LLMs implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +19,7 @@ class Replicate_LLMs implements INode {
|
|||
constructor() {
|
||||
this.label = 'Replicate'
|
||||
this.name = 'replicate'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'Replicate'
|
||||
this.icon = 'replicate.svg'
|
||||
this.category = 'LLMs'
|
||||
|
|
@ -30,6 +32,12 @@ class Replicate_LLMs implements INode {
|
|||
credentialNames: ['replicateApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'model',
|
||||
|
|
@ -103,7 +111,9 @@ class Replicate_LLMs implements INode {
|
|||
const name = modelName.split(':')[0].split('/').pop()
|
||||
const org = modelName.split(':')[0].split('/')[0]
|
||||
|
||||
const obj: ReplicateInput = {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: ReplicateInput & BaseLLMParams = {
|
||||
model: `${org}/${name}:${version}`,
|
||||
apiKey
|
||||
}
|
||||
|
|
@ -120,6 +130,8 @@ class Replicate_LLMs implements INode {
|
|||
}
|
||||
if (Object.keys(inputs).length) obj.input = inputs
|
||||
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new Replicate(obj)
|
||||
return model
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,118 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ICommonObject } from '../../../src'
|
||||
import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
|
||||
import { UpstashRedisChatMessageHistory } from 'langchain/stores/message/upstash_redis'
|
||||
|
||||
class UpstashRedisBackedChatMemory_Memory implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Upstash Redis-Backed Chat Memory'
|
||||
this.name = 'upstashRedisBackedChatMemory'
|
||||
this.version = 1.0
|
||||
this.type = 'UpstashRedisBackedChatMemory'
|
||||
this.icon = 'upstash.svg'
|
||||
this.category = 'Memory'
|
||||
this.description = 'Summarizes the conversation and stores the memory in Upstash Redis server'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(BufferMemory)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
description: 'Configure password authentication on your upstash redis instance',
|
||||
credentialNames: ['upstashRedisMemoryApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Upstash Redis REST URL',
|
||||
name: 'baseURL',
|
||||
type: 'string',
|
||||
placeholder: 'https://<your-url>.upstash.io'
|
||||
},
|
||||
{
|
||||
label: 'Session Id',
|
||||
name: 'sessionId',
|
||||
type: 'string',
|
||||
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId',
|
||||
default: '',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Session Timeouts',
|
||||
name: 'sessionTTL',
|
||||
type: 'number',
|
||||
description: 'Omit this parameter to make sessions never expire',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
return initalizeUpstashRedis(nodeData, options)
|
||||
}
|
||||
|
||||
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
|
||||
const redis = await initalizeUpstashRedis(nodeData, options)
|
||||
const sessionId = nodeData.inputs?.sessionId as string
|
||||
const chatId = options?.chatId as string
|
||||
options.logger.info(`Clearing Upstash Redis memory session ${sessionId ? sessionId : chatId}`)
|
||||
await redis.clear()
|
||||
options.logger.info(`Successfully cleared Upstash Redis memory session ${sessionId ? sessionId : chatId}`)
|
||||
}
|
||||
}
|
||||
|
||||
const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => {
|
||||
const baseURL = nodeData.inputs?.baseURL as string
|
||||
const sessionId = nodeData.inputs?.sessionId as string
|
||||
const sessionTTL = nodeData.inputs?.sessionTTL as string
|
||||
const chatId = options?.chatId as string
|
||||
|
||||
let isSessionIdUsingChatMessageId = false
|
||||
if (!sessionId && chatId) isSessionIdUsingChatMessageId = true
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const upstashRestToken = getCredentialParam('upstashRestToken', credentialData, nodeData)
|
||||
|
||||
const redisChatMessageHistory = new UpstashRedisChatMessageHistory({
|
||||
sessionId: sessionId ? sessionId : chatId,
|
||||
sessionTTL: sessionTTL ? parseInt(sessionTTL, 10) : undefined,
|
||||
config: {
|
||||
url: baseURL,
|
||||
token: upstashRestToken
|
||||
}
|
||||
})
|
||||
|
||||
const memory = new BufferMemoryExtended({
|
||||
chatHistory: redisChatMessageHistory,
|
||||
isSessionIdUsingChatMessageId
|
||||
})
|
||||
|
||||
return memory
|
||||
}
|
||||
|
||||
interface BufferMemoryExtendedInput {
|
||||
isSessionIdUsingChatMessageId: boolean
|
||||
}
|
||||
|
||||
class BufferMemoryExtended extends BufferMemory {
|
||||
isSessionIdUsingChatMessageId? = false
|
||||
|
||||
constructor(fields: BufferMemoryInput & Partial<BufferMemoryExtendedInput>) {
|
||||
super(fields)
|
||||
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: UpstashRedisBackedChatMemory_Memory }
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="256px" height="341px" viewBox="0 0 256 341" version="1.1" xmlns="http://www.w3.org/2000/svg" preserveAspectRatio="xMidYMid">
|
||||
<title>upstash</title>
|
||||
<g>
|
||||
<path d="M0,298.416784 C56.5542815,354.970323 148.246768,354.970323 204.801032,298.416784 C261.354571,241.86252 261.354571,150.170106 204.801032,93.6158424 L179.200462,119.215688 C221.61634,161.631567 221.61634,230.401059 179.200462,272.816213 C136.785307,315.232092 68.0157428,315.232092 25.5998642,272.816213 L0,298.416784 Z" fill="#00C98D"></path>
|
||||
<path d="M51.200362,247.216367 C79.4772765,275.493137 125.323122,275.493137 153.600615,247.216367 C181.877385,218.939598 181.877385,173.093028 153.600615,144.816259 L128.000769,170.416105 C142.139154,184.55449 142.139154,207.477412 128.000769,221.616521 C113.86166,235.754906 90.9387378,235.754906 76.800353,221.616521 L51.200362,247.216367 Z" fill="#00C98D"></path>
|
||||
<path d="M256,42.415426 C199.445737,-14.1384753 107.753322,-14.1384753 51.1994207,42.415426 C-5.35485714,98.9696894 -5.35485714,190.662104 51.1994207,247.216367 L76.7989048,221.616521 C34.3841124,179.200643 34.3841124,110.431151 76.7989048,68.0159962 C119.214783,25.6001177 187.984275,25.6001177 230.39943,68.0159962 L256,42.415426 Z" fill="#00C98D"></path>
|
||||
<path d="M204.800308,93.6158424 C176.523538,65.3390727 130.676245,65.3390727 102.399475,93.6158424 C74.1219813,121.893336 74.1219813,167.739181 102.399475,196.015951 L127.999321,170.416105 C113.860936,156.27772 113.860936,133.354797 127.999321,119.215688 C142.137706,105.077304 165.060629,105.077304 179.199738,119.215688 L204.800308,93.6158424 Z" fill="#00C98D"></path>
|
||||
<path d="M256,42.415426 C199.445737,-14.1384753 107.753322,-14.1384753 51.1994207,42.415426 C-5.35485714,98.9696894 -5.35485714,190.662104 51.1994207,247.216367 L76.7989048,221.616521 C34.3841124,179.200643 34.3841124,110.431151 76.7989048,68.0159962 C119.214783,25.6001177 187.984275,25.6001177 230.39943,68.0159962 L256,42.415426 Z" fill-opacity="0.4" fill="#FFFFFF"></path>
|
||||
<path d="M204.800308,93.6158424 C176.523538,65.3390727 130.676245,65.3390727 102.399475,93.6158424 C74.1219813,121.893336 74.1219813,167.739181 102.399475,196.015951 L127.999321,170.416105 C113.860936,156.27772 113.860936,133.354797 127.999321,119.215688 C142.137706,105.077304 165.060629,105.077304 179.199738,119.215688 L204.800308,93.6158424 Z" fill-opacity="0.4" fill="#FFFFFF"></path>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.4 KiB |
|
|
@ -0,0 +1,107 @@
|
|||
import { VectorStore } from 'langchain/vectorstores/base'
|
||||
import { INode, INodeData, INodeParams, INodeOutputsValue } from '../../../src/Interface'
|
||||
import { handleEscapeCharacters } from '../../../src'
|
||||
import { ScoreThresholdRetriever } from 'langchain/retrievers/score_threshold'
|
||||
|
||||
class SimilarityThresholdRetriever_Retrievers implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Similarity Score Threshold Retriever'
|
||||
this.name = 'similarityThresholdRetriever'
|
||||
this.version = 1.0
|
||||
this.type = 'SimilarityThresholdRetriever'
|
||||
this.icon = 'similaritythreshold.svg'
|
||||
this.category = 'Retrievers'
|
||||
this.description = 'Return results based on the minimum similarity percentage'
|
||||
this.baseClasses = [this.type, 'BaseRetriever']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Vector Store',
|
||||
name: 'vectorStore',
|
||||
type: 'VectorStore'
|
||||
},
|
||||
{
|
||||
label: 'Minimum Similarity Score (%)',
|
||||
name: 'minSimilarityScore',
|
||||
description: 'Finds results with at least this similarity score',
|
||||
type: 'number',
|
||||
default: 80,
|
||||
step: 1
|
||||
},
|
||||
{
|
||||
label: 'Max K',
|
||||
name: 'maxK',
|
||||
description: `The maximum number of results to fetch`,
|
||||
type: 'number',
|
||||
default: 20,
|
||||
step: 1
|
||||
},
|
||||
{
|
||||
label: 'K Increment',
|
||||
name: 'kIncrement',
|
||||
description: `How much to increase K by each time. It'll fetch N results, then N + kIncrement, then N + kIncrement * 2, etc.`,
|
||||
type: 'number',
|
||||
default: 2,
|
||||
step: 1
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Similarity Threshold Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
baseClasses: ['Document']
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
baseClasses: ['string', 'json']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string): Promise<any> {
|
||||
const vectorStore = nodeData.inputs?.vectorStore as VectorStore
|
||||
const minSimilarityScore = nodeData.inputs?.minSimilarityScore as number
|
||||
const maxK = nodeData.inputs?.maxK as string
|
||||
const kIncrement = nodeData.inputs?.kIncrement as string
|
||||
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
const retriever = ScoreThresholdRetriever.fromVectorStore(vectorStore, {
|
||||
minSimilarityScore: minSimilarityScore ? minSimilarityScore / 100 : 0.9,
|
||||
maxK: maxK ? parseInt(maxK, 10) : 100,
|
||||
kIncrement: kIncrement ? parseInt(kIncrement, 10) : 2
|
||||
})
|
||||
|
||||
if (output === 'retriever') return retriever
|
||||
else if (output === 'document') return await retriever.getRelevantDocuments(input)
|
||||
else if (output === 'text') {
|
||||
let finaltext = ''
|
||||
|
||||
const docs = await retriever.getRelevantDocuments(input)
|
||||
|
||||
for (const doc of docs) finaltext += `${doc.pageContent}\n`
|
||||
|
||||
return handleEscapeCharacters(finaltext, false)
|
||||
}
|
||||
|
||||
return retriever
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: SimilarityThresholdRetriever_Retrievers }
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-chart-line" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path stroke="none" d="M0 0h24v24H0z" fill="none"></path>
|
||||
<path d="M4 19l16 0"></path>
|
||||
<path d="M4 15l4 -6l4 2l4 -5l4 4"></path>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 374 B |
|
|
@ -30,7 +30,7 @@ export class ChromaExtended extends Chroma {
|
|||
if (this.chromaApiKey) {
|
||||
obj.fetchOptions = {
|
||||
headers: {
|
||||
'X-Api-Key': this.chromaApiKey
|
||||
Authorization: `Bearer ${this.chromaApiKey}`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise-components",
|
||||
"version": "1.3.7",
|
||||
"version": "1.3.8",
|
||||
"description": "Flowiseai Components",
|
||||
"main": "dist/src/index",
|
||||
"types": "dist/src/index.d.ts",
|
||||
|
|
@ -18,8 +18,9 @@
|
|||
"dependencies": {
|
||||
"@aws-sdk/client-dynamodb": "^3.360.0",
|
||||
"@dqbd/tiktoken": "^1.0.7",
|
||||
"@getzep/zep-js": "^0.6.3",
|
||||
"@elastic/elasticsearch": "^8.9.0",
|
||||
"@getzep/zep-js": "^0.6.3",
|
||||
"@gomomento/sdk": "^1.40.2",
|
||||
"@google-ai/generativelanguage": "^0.2.1",
|
||||
"@huggingface/inference": "^2.6.1",
|
||||
"@notionhq/client": "^2.2.8",
|
||||
|
|
@ -29,6 +30,7 @@
|
|||
"@supabase/supabase-js": "^2.29.0",
|
||||
"@types/js-yaml": "^4.0.5",
|
||||
"@types/jsdom": "^21.1.1",
|
||||
"@upstash/redis": "^1.22.1",
|
||||
"@zilliz/milvus2-sdk-node": "^2.2.24",
|
||||
"apify-client": "^2.7.1",
|
||||
"axios": "^0.27.2",
|
||||
|
|
@ -43,7 +45,8 @@
|
|||
"google-auth-library": "^9.0.0",
|
||||
"graphql": "^16.6.0",
|
||||
"html-to-text": "^9.0.5",
|
||||
"langchain": "^0.0.152",
|
||||
"ioredis": "^5.3.2",
|
||||
"langchain": "^0.0.157",
|
||||
"langfuse-langchain": "^1.0.14-alpha.0",
|
||||
"langsmith": "^0.0.32",
|
||||
"linkifyjs": "^4.1.1",
|
||||
|
|
|
|||
|
|
@ -151,6 +151,7 @@ export class CustomChainHandler extends BaseCallbackHandler {
|
|||
socketIOClientId = ''
|
||||
skipK = 0 // Skip streaming for first K numbers of handleLLMStart
|
||||
returnSourceDocuments = false
|
||||
cachedResponse = true
|
||||
|
||||
constructor(socketIO: Server, socketIOClientId: string, skipK?: number, returnSourceDocuments?: boolean) {
|
||||
super()
|
||||
|
|
@ -161,6 +162,7 @@ export class CustomChainHandler extends BaseCallbackHandler {
|
|||
}
|
||||
|
||||
handleLLMStart() {
|
||||
this.cachedResponse = false
|
||||
if (this.skipK > 0) this.skipK -= 1
|
||||
}
|
||||
|
||||
|
|
@ -178,9 +180,30 @@ export class CustomChainHandler extends BaseCallbackHandler {
|
|||
this.socketIO.to(this.socketIOClientId).emit('end')
|
||||
}
|
||||
|
||||
handleChainEnd(outputs: ChainValues): void | Promise<void> {
|
||||
if (this.returnSourceDocuments) {
|
||||
this.socketIO.to(this.socketIOClientId).emit('sourceDocuments', outputs?.sourceDocuments)
|
||||
handleChainEnd(outputs: ChainValues, _: string, parentRunId?: string): void | Promise<void> {
|
||||
/*
|
||||
Langchain does not call handleLLMStart, handleLLMEnd, handleLLMNewToken when the chain is cached.
|
||||
Callback Order is "Chain Start -> LLM Start --> LLM Token --> LLM End -> Chain End" for normal responses.
|
||||
Callback Order is "Chain Start -> Chain End" for cached responses.
|
||||
*/
|
||||
if (this.cachedResponse && parentRunId === undefined) {
|
||||
const cachedValue = outputs.text ?? outputs.response ?? outputs.output ?? outputs.output_text
|
||||
//split at whitespace, and keep the whitespace. This is to preserve the original formatting.
|
||||
const result = cachedValue.split(/(\s+)/)
|
||||
result.forEach((token: string, index: number) => {
|
||||
if (index === 0) {
|
||||
this.socketIO.to(this.socketIOClientId).emit('start', token)
|
||||
}
|
||||
this.socketIO.to(this.socketIOClientId).emit('token', token)
|
||||
})
|
||||
if (this.returnSourceDocuments) {
|
||||
this.socketIO.to(this.socketIOClientId).emit('sourceDocuments', outputs?.sourceDocuments)
|
||||
}
|
||||
this.socketIO.to(this.socketIOClientId).emit('end')
|
||||
} else {
|
||||
if (this.returnSourceDocuments) {
|
||||
this.socketIO.to(this.socketIOClientId).emit('sourceDocuments', outputs?.sourceDocuments)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@
|
|||
"id": "chatOpenAI_1",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -201,7 +201,15 @@
|
|||
"id": "chatOpenAI_1-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_1-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -392,7 +400,7 @@
|
|||
"id": "chatOpenAI_2",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -504,7 +512,15 @@
|
|||
"id": "chatOpenAI_2-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_2-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -397,7 +397,7 @@
|
|||
"id": "chatOpenAI_2",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -509,7 +509,15 @@
|
|||
"id": "chatOpenAI_2-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_2-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -551,7 +559,7 @@
|
|||
"id": "chatOpenAI_1",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -663,7 +671,15 @@
|
|||
"id": "chatOpenAI_1-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_1-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -705,7 +721,7 @@
|
|||
"id": "chatOpenAI_3",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -817,7 +833,15 @@
|
|||
"id": "chatOpenAI_3-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_3-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -169,14 +169,14 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"position": {
|
||||
"x": 1226.7977900193628,
|
||||
"y": 48.01100655894436
|
||||
"y": -22.01100655894436
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -288,7 +288,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -313,7 +321,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1226.7977900193628,
|
||||
"y": 48.01100655894436
|
||||
"y": -22.01100655894436
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -364,7 +364,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -190,7 +190,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -182,7 +182,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -327,7 +327,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -141,14 +141,14 @@
|
|||
"id": "chatAnthropic_0",
|
||||
"position": {
|
||||
"x": 800.5525382783799,
|
||||
"y": -76.7988221837009
|
||||
"y": -130.7988221837009
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatAnthropic_0",
|
||||
"label": "ChatAnthropic",
|
||||
"name": "chatAnthropic",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatAnthropic",
|
||||
"baseClasses": ["ChatAnthropic", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -258,7 +258,15 @@
|
|||
"id": "chatAnthropic_0-input-topK-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatAnthropic_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "claude-2",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -280,7 +288,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 800.5525382783799,
|
||||
"y": -76.7988221837009
|
||||
"y": -130.7988221837009
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -269,7 +269,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "chatOpenAI",
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
|
|
@ -126,7 +126,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-16k",
|
||||
"temperature": "0",
|
||||
|
|
|
|||
|
|
@ -7,14 +7,14 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"position": {
|
||||
"x": 1184.1176114500388,
|
||||
"y": -44.15535835370571
|
||||
"y": -74.15535835370571
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -126,7 +126,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": "0",
|
||||
|
|
@ -150,7 +158,7 @@
|
|||
},
|
||||
"positionAbsolute": {
|
||||
"x": 1184.1176114500388,
|
||||
"y": -44.15535835370571
|
||||
"y": -74.15535835370571
|
||||
},
|
||||
"selected": false,
|
||||
"dragging": false
|
||||
|
|
|
|||
|
|
@ -386,7 +386,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -498,7 +498,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -148,14 +148,14 @@
|
|||
"id": "huggingFaceInference_LLMs_0",
|
||||
"position": {
|
||||
"x": 498.8594464193537,
|
||||
"y": -44.91050256311678
|
||||
"y": -94.91050256311678
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "huggingFaceInference_LLMs_0",
|
||||
"label": "HuggingFace Inference",
|
||||
"name": "huggingFaceInference_LLMs",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "HuggingFaceInference",
|
||||
"baseClasses": ["HuggingFaceInference", "LLM", "BaseLLM", "BaseLanguageModel"],
|
||||
"category": "LLMs",
|
||||
|
|
@ -232,7 +232,15 @@
|
|||
"id": "huggingFaceInference_LLMs_0-input-frequencyPenalty-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "huggingFaceInference_LLMs_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"model": "tiiuae/falcon-7b-instruct",
|
||||
"endpoint": "",
|
||||
|
|
@ -256,7 +264,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 498.8594464193537,
|
||||
"y": -44.91050256311678
|
||||
"y": -94.91050256311678
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -265,14 +265,14 @@
|
|||
"id": "chatLocalAI_0",
|
||||
"position": {
|
||||
"x": 1191.9512064167336,
|
||||
"y": -44.05401001663306
|
||||
"y": -94.05401001663306
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatLocalAI_0",
|
||||
"label": "ChatLocalAI",
|
||||
"name": "chatLocalAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatLocalAI",
|
||||
"baseClasses": ["ChatLocalAI", "BaseChatModel", "LLM", "BaseLLM", "BaseLanguageModel", "BaseLangChain"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -325,7 +325,15 @@
|
|||
"id": "chatLocalAI_0-input-timeout-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatLocalAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"basePath": "http://localhost:8080/v1",
|
||||
"modelName": "ggml-gpt4all-j.bin",
|
||||
|
|
@ -348,7 +356,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1191.9512064167336,
|
||||
"y": -44.05401001663306
|
||||
"y": -94.05401001663306
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
|
|||
|
|
@ -115,14 +115,14 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"position": {
|
||||
"x": 1554.3875781165111,
|
||||
"y": -14.792508259787212
|
||||
"y": -74.792508259787212
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -234,7 +234,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": "0",
|
||||
|
|
@ -259,7 +267,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1554.3875781165111,
|
||||
"y": -14.792508259787212
|
||||
"y": -74.792508259787212
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -268,7 +268,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -113,14 +113,14 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"position": {
|
||||
"x": 1197.7264239788542,
|
||||
"y": -16.177600120515933
|
||||
"y": -76.177600120515933
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -232,7 +232,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": "0",
|
||||
|
|
@ -257,7 +265,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1197.7264239788542,
|
||||
"y": -16.177600120515933
|
||||
"y": -76.177600120515933
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
|
|||
|
|
@ -436,7 +436,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -548,7 +548,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -278,7 +278,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -390,7 +390,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -679,7 +679,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -791,7 +791,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -321,14 +321,14 @@
|
|||
"id": "openAI_2",
|
||||
"position": {
|
||||
"x": 520.8471510168988,
|
||||
"y": -1282.1183473852964
|
||||
"y": -1362.1183473852964
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "openAI_2",
|
||||
"label": "OpenAI",
|
||||
"name": "openAI",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"type": "OpenAI",
|
||||
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
|
||||
"category": "LLMs",
|
||||
|
|
@ -436,7 +436,15 @@
|
|||
"id": "openAI_2-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "openAI_2-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-instruct",
|
||||
"temperature": 0.7,
|
||||
|
|
@ -463,7 +471,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 520.8471510168988,
|
||||
"y": -1282.1183473852964
|
||||
"y": -1362.1183473852964
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
@ -557,7 +565,7 @@
|
|||
"id": "chromaExistingIndex_0",
|
||||
"position": {
|
||||
"x": 509.55198017578016,
|
||||
"y": -732.42003311752
|
||||
"y": -782.42003311752
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
|
|
@ -638,7 +646,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 509.55198017578016,
|
||||
"y": -732.42003311752
|
||||
"y": -782.42003311752
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
@ -732,14 +740,14 @@
|
|||
"id": "openAI_3",
|
||||
"position": {
|
||||
"x": 504.808358369027,
|
||||
"y": -197.78194663790197
|
||||
"y": -257.78194663790197
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "openAI_3",
|
||||
"label": "OpenAI",
|
||||
"name": "openAI",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"type": "OpenAI",
|
||||
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
|
||||
"category": "LLMs",
|
||||
|
|
@ -847,7 +855,15 @@
|
|||
"id": "openAI_3-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "openAI_3-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-instruct",
|
||||
"temperature": 0.7,
|
||||
|
|
@ -874,7 +890,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 504.808358369027,
|
||||
"y": -197.78194663790197
|
||||
"y": -257.78194663790197
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
@ -993,14 +1009,14 @@
|
|||
"id": "openAI_4",
|
||||
"position": {
|
||||
"x": 1619.5346765785587,
|
||||
"y": 292.29615581180684
|
||||
"y": 352.29615581180684
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "openAI_4",
|
||||
"label": "OpenAI",
|
||||
"name": "openAI",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"type": "OpenAI",
|
||||
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
|
||||
"category": "LLMs",
|
||||
|
|
@ -1108,7 +1124,15 @@
|
|||
"id": "openAI_4-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "openAI_4-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-instruct",
|
||||
"temperature": 0.7,
|
||||
|
|
@ -1135,7 +1159,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1619.5346765785587,
|
||||
"y": 292.29615581180684
|
||||
"y": 352.29615581180684
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -281,7 +281,7 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -393,7 +393,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
|
|||
|
|
@ -260,13 +260,13 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"position": {
|
||||
"x": 335.7621848973805,
|
||||
"y": -651.7411273245009
|
||||
"y": -721.7411273245009
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "chatOpenAI",
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
|
||||
|
|
@ -385,7 +385,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-16k",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -411,7 +419,7 @@
|
|||
"dragging": false,
|
||||
"positionAbsolute": {
|
||||
"x": 335.7621848973805,
|
||||
"y": -651.7411273245009
|
||||
"y": -721.7411273245009
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
@ -420,13 +428,13 @@
|
|||
"id": "chatOpenAI_1",
|
||||
"position": {
|
||||
"x": 1765.2801848172305,
|
||||
"y": -667.9261054149061
|
||||
"y": -737.9261054149061
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_1",
|
||||
"label": "ChatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "chatOpenAI",
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
|
||||
|
|
@ -545,7 +553,15 @@
|
|||
"id": "chatOpenAI_1-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_1-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-16k",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -571,7 +587,7 @@
|
|||
"dragging": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1765.2801848172305,
|
||||
"y": -667.9261054149061
|
||||
"y": -737.9261054149061
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
|
|||
|
|
@ -289,14 +289,14 @@
|
|||
"id": "openAI_1",
|
||||
"position": {
|
||||
"x": 791.6102007244282,
|
||||
"y": -13.71386876566092
|
||||
"y": -83.71386876566092
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "openAI_1",
|
||||
"label": "OpenAI",
|
||||
"name": "openAI",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"type": "OpenAI",
|
||||
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
|
||||
"category": "LLMs",
|
||||
|
|
@ -404,7 +404,15 @@
|
|||
"id": "openAI_1-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "openAI_1-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-instruct",
|
||||
"temperature": 0.7,
|
||||
|
|
@ -431,7 +439,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 791.6102007244282,
|
||||
"y": -13.71386876566092
|
||||
"y": -83.71386876566092
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
@ -441,14 +449,14 @@
|
|||
"id": "openAI_2",
|
||||
"position": {
|
||||
"x": 1571.148617508543,
|
||||
"y": -20.372437481171687
|
||||
"y": -90.372437481171687
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "openAI_2",
|
||||
"label": "OpenAI",
|
||||
"name": "openAI",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"type": "OpenAI",
|
||||
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
|
||||
"category": "LLMs",
|
||||
|
|
@ -556,7 +564,15 @@
|
|||
"id": "openAI_2-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "openAI_2-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"default": "gpt-3.5-turbo-instruct",
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-instruct",
|
||||
|
|
@ -584,7 +600,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1571.148617508543,
|
||||
"y": -20.372437481171687
|
||||
"y": -90.372437481171687
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -148,13 +148,13 @@
|
|||
"id": "replicate_0",
|
||||
"position": {
|
||||
"x": 623.313978186024,
|
||||
"y": -72.92788335022428
|
||||
"y": -142.92788335022428
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "replicate_0",
|
||||
"label": "Replicate",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "replicate",
|
||||
"type": "Replicate",
|
||||
"baseClasses": ["Replicate", "BaseChatModel", "LLM", "BaseLLM", "BaseLanguageModel", "Runnable"],
|
||||
|
|
@ -226,7 +226,15 @@
|
|||
"id": "replicate_0-input-additionalInputs-json"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "replicate_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"model": "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
|
||||
"temperature": 0.7,
|
||||
|
|
@ -249,7 +257,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 623.313978186024,
|
||||
"y": -72.92788335022428
|
||||
"y": -142.92788335022428
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "chatOpenAI",
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
|
|
@ -126,7 +126,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": "0",
|
||||
|
|
@ -167,7 +175,7 @@
|
|||
"data": {
|
||||
"id": "sqlDatabaseChain_0",
|
||||
"label": "Sql Database Chain",
|
||||
"version": 2,
|
||||
"version": 4,
|
||||
"name": "sqlDatabaseChain",
|
||||
"type": "SqlDatabaseChain",
|
||||
"baseClasses": ["SqlDatabaseChain", "BaseChain", "Runnable"],
|
||||
|
|
@ -206,6 +214,46 @@
|
|||
"placeholder": "1270.0.0.1:5432/chinook",
|
||||
"id": "sqlDatabaseChain_0-input-url-string"
|
||||
},
|
||||
{
|
||||
"label": "Include Tables",
|
||||
"name": "includesTables",
|
||||
"type": "string",
|
||||
"description": "Tables to include for queries, seperated by comma. Can only use Include Tables or Ignore Tables",
|
||||
"placeholder": "table1, table2",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "sqlDatabaseChain_0-input-includesTables-string"
|
||||
},
|
||||
{
|
||||
"label": "Ignore Tables",
|
||||
"name": "ignoreTables",
|
||||
"type": "string",
|
||||
"description": "Tables to ignore for queries, seperated by comma. Can only use Ignore Tables or Include Tables",
|
||||
"placeholder": "table1, table2",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "sqlDatabaseChain_0-input-ignoreTables-string"
|
||||
},
|
||||
{
|
||||
"label": "Sample table's rows info",
|
||||
"name": "sampleRowsInTableInfo",
|
||||
"type": "number",
|
||||
"description": "Number of sample row for tables to load for info.",
|
||||
"placeholder": "3",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "sqlDatabaseChain_0-input-sampleRowsInTableInfo-number"
|
||||
},
|
||||
{
|
||||
"label": "Top Keys",
|
||||
"name": "topK",
|
||||
"type": "number",
|
||||
"description": "If you are querying for several rows of a table you can select the maximum number of results you want to get by using the top_k parameter (default is 10). This is useful for avoiding query results that exceed the prompt max length or consume tokens unnecessarily.",
|
||||
"placeholder": "10",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "sqlDatabaseChain_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Custom Prompt",
|
||||
"name": "customPrompt",
|
||||
|
|
|
|||
|
|
@ -64,14 +64,14 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"position": {
|
||||
"x": 754.8942497823595,
|
||||
"y": -70.76607584232393
|
||||
"y": -140
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -183,7 +183,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -208,7 +216,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 754.8942497823595,
|
||||
"y": -70.76607584232393
|
||||
"y": -140
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
|
|||
|
|
@ -148,14 +148,14 @@
|
|||
"id": "openAI_0",
|
||||
"position": {
|
||||
"x": 513.3297923232442,
|
||||
"y": -42.67554802812833
|
||||
"y": -112.67554802812833
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "openAI_0",
|
||||
"label": "OpenAI",
|
||||
"name": "openAI",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"type": "OpenAI",
|
||||
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
|
||||
"category": "LLMs",
|
||||
|
|
@ -263,7 +263,15 @@
|
|||
"id": "openAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "openAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-instruct",
|
||||
"temperature": 0.7,
|
||||
|
|
@ -290,7 +298,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 513.3297923232442,
|
||||
"y": -42.67554802812833
|
||||
"y": -112.67554802812833
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -157,14 +157,14 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"position": {
|
||||
"x": 436.97058562345904,
|
||||
"y": 99.96180150605153
|
||||
"y": 29.96180150605153
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -276,7 +276,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": "0",
|
||||
|
|
@ -301,7 +309,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 436.97058562345904,
|
||||
"y": 99.96180150605153
|
||||
"y": 29.96180150605153
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@
|
|||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "chatOpenAI",
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
|
||||
|
|
@ -230,7 +230,15 @@
|
|||
"id": "chatOpenAI_0-input-baseOptions-json"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": "0.5",
|
||||
|
|
|
|||
|
|
@ -194,14 +194,14 @@
|
|||
"id": "chatOpenAI_0",
|
||||
"position": {
|
||||
"x": 734.7477982032904,
|
||||
"y": -400.9979556765114
|
||||
"y": -470.9979556765114
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -313,7 +313,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -338,7 +346,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 734.7477982032904,
|
||||
"y": -400.9979556765114
|
||||
"y": -470.9979556765114
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
|
|
@ -432,14 +440,14 @@
|
|||
"id": "chatOpenAI_1",
|
||||
"position": {
|
||||
"x": 68.312124033115,
|
||||
"y": -169.65476709991256
|
||||
"y": -239.65476709991256
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_1",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
|
|
@ -551,7 +559,15 @@
|
|||
"id": "chatOpenAI_1-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_1-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
|
@ -576,7 +592,7 @@
|
|||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 68.312124033115,
|
||||
"y": -169.65476709991256
|
||||
"y": -239.65476709991256
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "chatOpenAI",
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
|
|
@ -126,7 +126,15 @@
|
|||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-16k",
|
||||
"temperature": "0.9",
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@
|
|||
"id": "openAI_0",
|
||||
"label": "OpenAI",
|
||||
"name": "openAI",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"type": "OpenAI",
|
||||
"baseClasses": ["OpenAI", "BaseLLM", "BaseLanguageModel"],
|
||||
"category": "LLMs",
|
||||
|
|
@ -223,7 +223,15 @@
|
|||
"id": "openAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "openAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo-instruct",
|
||||
"temperature": 0.7,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "1.3.6",
|
||||
"version": "1.3.7",
|
||||
"description": "Flowiseai Server",
|
||||
"main": "dist/index",
|
||||
"types": "dist/index.d.ts",
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ export default class Start extends Command {
|
|||
LOG_LEVEL: Flags.string(),
|
||||
TOOL_FUNCTION_BUILTIN_DEP: Flags.string(),
|
||||
TOOL_FUNCTION_EXTERNAL_DEP: Flags.string(),
|
||||
NUMBER_OF_PROXIES: Flags.string(),
|
||||
DATABASE_TYPE: Flags.string(),
|
||||
DATABASE_PATH: Flags.string(),
|
||||
DATABASE_PORT: Flags.string(),
|
||||
|
|
@ -72,6 +73,7 @@ export default class Start extends Command {
|
|||
|
||||
if (flags.PORT) process.env.PORT = flags.PORT
|
||||
if (flags.DEBUG) process.env.DEBUG = flags.DEBUG
|
||||
if (flags.NUMBER_OF_PROXIES) process.env.NUMBER_OF_PROXIES = flags.NUMBER_OF_PROXIES
|
||||
|
||||
// Authorization
|
||||
if (flags.FLOWISE_USERNAME) process.env.FLOWISE_USERNAME = flags.FLOWISE_USERNAME
|
||||
|
|
|
|||
|
|
@ -477,6 +477,7 @@ export const replaceInputsWithConfig = (flowNodeData: INodeData, overrideConfig:
|
|||
*/
|
||||
export const isStartNodeDependOnInput = (startingNodes: IReactFlowNode[], nodes: IReactFlowNode[]): boolean => {
|
||||
for (const node of startingNodes) {
|
||||
if (node.data.category === 'Cache') return true
|
||||
for (const inputName in node.data.inputs) {
|
||||
const inputVariables = getInputVariables(node.data.inputs[inputName])
|
||||
if (inputVariables.length > 0) return true
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ async function addRateLimiter(id: string, duration: number, limit: number, messa
|
|||
rateLimiters[id] = rateLimit({
|
||||
windowMs: duration * 1000,
|
||||
max: limit,
|
||||
handler: (req, res) => {
|
||||
handler: (_, res) => {
|
||||
res.status(429).send(message)
|
||||
}
|
||||
})
|
||||
|
|
@ -33,15 +33,19 @@ export function getRateLimiter(req: Request, res: Response, next: NextFunction)
|
|||
|
||||
export async function createRateLimiter(chatFlow: IChatFlow) {
|
||||
if (!chatFlow.apiConfig) return
|
||||
const apiConfig: any = JSON.parse(chatFlow.apiConfig)
|
||||
const apiConfig = JSON.parse(chatFlow.apiConfig)
|
||||
|
||||
const rateLimit: { limitDuration: number; limitMax: number; limitMsg: string } = apiConfig.rateLimit
|
||||
if (!rateLimit) return
|
||||
|
||||
const { limitDuration, limitMax, limitMsg } = rateLimit
|
||||
if (limitMax && limitDuration && limitMsg) await addRateLimiter(chatFlow.id, limitDuration, limitMax, limitMsg)
|
||||
}
|
||||
|
||||
export async function initializeRateLimiter(chatFlowPool: IChatFlow[]) {
|
||||
await chatFlowPool.map(async (chatFlow) => {
|
||||
await createRateLimiter(chatFlow)
|
||||
})
|
||||
await Promise.all(
|
||||
chatFlowPool.map(async (chatFlow) => {
|
||||
await createRateLimiter(chatFlow)
|
||||
})
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise-ui",
|
||||
"version": "1.3.4",
|
||||
"version": "1.3.5",
|
||||
"license": "SEE LICENSE IN LICENSE.md",
|
||||
"homepage": "https://flowiseai.com",
|
||||
"author": {
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ const Configuration = () => {
|
|||
<TooltipWithParser
|
||||
style={{ mb: 1, mt: 2, marginLeft: 10 }}
|
||||
title={
|
||||
'Visit <a target="_blank" href="https://docs.flowiseai.com/deployment#rate-limit-setup-guide">Rate Limit Setup Guide</a> to set up Rate Limit correctly in your hosting environment.'
|
||||
'Visit <a target="_blank" href="https://docs.flowiseai.com/rate-limit">Rate Limit Setup Guide</a> to set up Rate Limit correctly in your hosting environment.'
|
||||
}
|
||||
/>
|
||||
</Typography>
|
||||
|
|
|
|||
|
|
@ -64,27 +64,10 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
|
|||
window.open(data, '_blank')
|
||||
}
|
||||
|
||||
const handleVectaraMetadata = (message) => {
|
||||
if (message.sourceDocuments && message.sourceDocuments[0].metadata.length)
|
||||
message.sourceDocuments = message.sourceDocuments.map((docs) => {
|
||||
const newMetadata = docs.metadata.reduce((newMetadata, metadata) => {
|
||||
newMetadata[metadata.name] = metadata.value
|
||||
return newMetadata
|
||||
}, {})
|
||||
return {
|
||||
pageContent: docs.pageContent,
|
||||
metadata: newMetadata
|
||||
}
|
||||
})
|
||||
return message
|
||||
}
|
||||
|
||||
const removeDuplicateURL = (message) => {
|
||||
const visitedURLs = []
|
||||
const newSourceDocuments = []
|
||||
|
||||
message = handleVectaraMetadata(message)
|
||||
|
||||
message.sourceDocuments.forEach((source) => {
|
||||
if (isValidURL(source.metadata.source) && !visitedURLs.includes(source.metadata.source)) {
|
||||
visitedURLs.push(source.metadata.source)
|
||||
|
|
@ -174,8 +157,6 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
|
|||
if (response.data) {
|
||||
let data = response.data
|
||||
|
||||
data = handleVectaraMetadata(data)
|
||||
|
||||
if (typeof data === 'object' && data.text && data.sourceDocuments) {
|
||||
if (!isChatFlowAvailableToStream) {
|
||||
setMessages((prevMessages) => [
|
||||
|
|
|
|||
|
|
@ -175,19 +175,8 @@ const Marketplace = () => {
|
|||
)}
|
||||
</TabPanel>
|
||||
))}
|
||||
{!isChatflowsLoading && (!getAllChatflowsMarketplacesApi.data || getAllChatflowsMarketplacesApi.data.length === 0) && (
|
||||
<Stack sx={{ alignItems: 'center', justifyContent: 'center' }} flexDirection='column'>
|
||||
<Box sx={{ p: 2, height: 'auto' }}>
|
||||
<img
|
||||
style={{ objectFit: 'cover', height: '30vh', width: 'auto' }}
|
||||
src={WorkflowEmptySVG}
|
||||
alt='WorkflowEmptySVG'
|
||||
/>
|
||||
</Box>
|
||||
<div>No Marketplace Yet</div>
|
||||
</Stack>
|
||||
)}
|
||||
{!isToolsLoading && (!getAllToolsMarketplacesApi.data || getAllToolsMarketplacesApi.data.length === 0) && (
|
||||
{((!isChatflowsLoading && (!getAllChatflowsMarketplacesApi.data || getAllChatflowsMarketplacesApi.data.length === 0)) ||
|
||||
(!isToolsLoading && (!getAllToolsMarketplacesApi.data || getAllToolsMarketplacesApi.data.length === 0))) && (
|
||||
<Stack sx={{ alignItems: 'center', justifyContent: 'center' }} flexDirection='column'>
|
||||
<Box sx={{ p: 2, height: 'auto' }}>
|
||||
<img
|
||||
|
|
|
|||