Merge branch 'FlowiseAI:main' into patch-6
This commit is contained in:
commit
5f7efd3b83
|
|
@ -6,6 +6,8 @@ import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '..
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class Airtable_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -22,7 +24,7 @@ class Airtable_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'Airtable Agent'
|
||||
this.name = 'airtableAgent'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'airtable.svg'
|
||||
|
|
@ -71,6 +73,14 @@ class Airtable_Agents implements INode {
|
|||
default: 100,
|
||||
additionalParams: true,
|
||||
description: 'Number of results to return'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -80,12 +90,24 @@ class Airtable_Agents implements INode {
|
|||
return undefined
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const baseId = nodeData.inputs?.baseId as string
|
||||
const tableId = nodeData.inputs?.tableId as string
|
||||
const returnAll = nodeData.inputs?.returnAll as boolean
|
||||
const limit = nodeData.inputs?.limit as string
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Vectara chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const accessToken = getCredentialParam('accessToken', credentialData, nodeData)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ import { PromptTemplate } from '@langchain/core/prompts'
|
|||
import { AutoGPT } from 'langchain/experimental/autogpt'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
type ObjectTool = StructuredTool
|
||||
const FINISH_NAME = 'finish'
|
||||
|
|
@ -25,7 +27,7 @@ class AutoGPT_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'AutoGPT'
|
||||
this.name = 'autoGPT'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AutoGPT'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'autogpt.svg'
|
||||
|
|
@ -68,6 +70,14 @@ class AutoGPT_Agents implements INode {
|
|||
type: 'number',
|
||||
default: 5,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -92,9 +102,21 @@ class AutoGPT_Agents implements INode {
|
|||
return autogpt
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string): Promise<string | object> {
|
||||
const executor = nodeData.instance as AutoGPT
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the AutoGPT agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let totalAssistantReply = ''
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
|||
import { VectorStore } from '@langchain/core/vectorstores'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { BabyAGI } from './core'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class BabyAGI_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -17,7 +19,7 @@ class BabyAGI_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'BabyAGI'
|
||||
this.name = 'babyAGI'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'BabyAGI'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'babyagi.svg'
|
||||
|
|
@ -39,6 +41,14 @@ class BabyAGI_Agents implements INode {
|
|||
name: 'taskLoop',
|
||||
type: 'number',
|
||||
default: 3
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -53,8 +63,21 @@ class BabyAGI_Agents implements INode {
|
|||
return babyAgi
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string): Promise<string | object> {
|
||||
const executor = nodeData.instance as BabyAGI
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the BabyAGI agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const objective = input
|
||||
|
||||
const res = await executor.call({ objective })
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from
|
|||
import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class CSV_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -20,7 +22,7 @@ class CSV_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'CSV Agent'
|
||||
this.name = 'csvAgent'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'CSVagent.svg'
|
||||
|
|
@ -47,6 +49,14 @@ class CSV_Agents implements INode {
|
|||
optional: true,
|
||||
placeholder:
|
||||
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -56,10 +66,22 @@ class CSV_Agents implements INode {
|
|||
return undefined
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const csvFileBase64 = nodeData.inputs?.csvFile as string
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the CSV agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
|
|
|
|||
|
|
@ -4,15 +4,16 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
|||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { AgentStep } from '@langchain/core/agents'
|
||||
import { renderTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
|
||||
import { renderTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { ChatConversationalAgent } from 'langchain/agents'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { AgentExecutor } from '../../../src/agents'
|
||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
|
||||
|
||||
|
|
@ -46,7 +47,7 @@ class ConversationalAgent_Agents implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Agent'
|
||||
this.name = 'conversationalAgent'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
|
|
@ -77,6 +78,14 @@ class ConversationalAgent_Agents implements INode {
|
|||
default: DEFAULT_PREFIX,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
|
|
@ -86,9 +95,20 @@ class ConversationalAgent_Agents implements INode {
|
|||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the BabyAGI agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const executor = await prepareAgent(
|
||||
nodeData,
|
||||
options,
|
||||
|
|
@ -150,33 +170,32 @@ const prepareAgent = async (
|
|||
outputParser
|
||||
})
|
||||
|
||||
if (model instanceof ChatOpenAI) {
|
||||
let humanImageMessages: HumanMessage[] = []
|
||||
if (llmSupportsVision(model)) {
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision
|
||||
model.modelName = 'gpt-4-vision-preview'
|
||||
|
||||
// Change default max token to higher when using gpt-4-vision
|
||||
model.maxTokens = 1024
|
||||
|
||||
for (const msg of messageContent) {
|
||||
humanImageMessages.push(new HumanMessage({ content: [msg] }))
|
||||
}
|
||||
visionChatModel.setVisionModel()
|
||||
|
||||
// Pop the `agent_scratchpad` MessagePlaceHolder
|
||||
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
|
||||
|
||||
// Add the HumanMessage for images
|
||||
prompt.promptMessages.push(...humanImageMessages)
|
||||
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
|
||||
const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate
|
||||
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: template
|
||||
}
|
||||
])
|
||||
msg.inputVariables = lastMessage.inputVariables
|
||||
prompt.promptMessages.push(msg)
|
||||
}
|
||||
|
||||
// Add the `agent_scratchpad` MessagePlaceHolder back
|
||||
prompt.promptMessages.push(messagePlaceholder)
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
model.modelName = model.configuredModel
|
||||
model.maxTokens = model.configuredMaxToken
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams }
|
|||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`
|
||||
|
||||
|
|
@ -28,7 +30,7 @@ class ConversationalRetrievalAgent_Agents implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Retrieval Agent'
|
||||
this.name = 'conversationalRetrievalAgent'
|
||||
this.version = 3.0
|
||||
this.version = 4.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
|
|
@ -59,6 +61,14 @@ class ConversationalRetrievalAgent_Agents implements INode {
|
|||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
|
|
@ -68,8 +78,21 @@ class ConversationalRetrievalAgent_Agents implements INode {
|
|||
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the BabyAGI agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
|
|
|
|||
|
|
@ -1,17 +1,17 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { AgentExecutor } from 'langchain/agents'
|
||||
import { pull } from 'langchain/hub'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import type { PromptTemplate } from '@langchain/core/prompts'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { pull } from 'langchain/hub'
|
||||
import { additionalCallbacks } from '../../../src/handler'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { createReactAgent } from '../../../src/agents'
|
||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { HumanMessage } from '@langchain/core/messages'
|
||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate } from 'langchain/prompts'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MRKLAgentChat_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -28,7 +28,7 @@ class MRKLAgentChat_Agents implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'ReAct Agent for Chat Models'
|
||||
this.name = 'mrklAgentChat'
|
||||
this.version = 3.0
|
||||
this.version = 4.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
|
|
@ -50,6 +50,14 @@ class MRKLAgentChat_Agents implements INode {
|
|||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
|
|
@ -59,32 +67,47 @@ class MRKLAgentChat_Agents implements INode {
|
|||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the ReAct Agent for Chat Models
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
tools = flatten(tools)
|
||||
|
||||
const prompt = await pull<PromptTemplate>('hwchase17/react-chat')
|
||||
let chatPromptTemplate = undefined
|
||||
|
||||
if (model instanceof ChatOpenAI) {
|
||||
if (llmSupportsVision(model)) {
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision
|
||||
model.modelName = 'gpt-4-vision-preview'
|
||||
|
||||
// Change default max token to higher when using gpt-4-vision
|
||||
model.maxTokens = 1024
|
||||
|
||||
// Change model to vision supported
|
||||
visionChatModel.setVisionModel()
|
||||
const oldTemplate = prompt.template as string
|
||||
chatPromptTemplate = ChatPromptTemplate.fromMessages([HumanMessagePromptTemplate.fromTemplate(oldTemplate)])
|
||||
chatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
|
||||
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: oldTemplate
|
||||
}
|
||||
])
|
||||
msg.inputVariables = prompt.inputVariables
|
||||
chatPromptTemplate = ChatPromptTemplate.fromMessages([msg])
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
model.modelName = model.configuredModel
|
||||
model.maxTokens = model.configuredMaxToken
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3,11 +3,13 @@ import { AgentExecutor } from 'langchain/agents'
|
|||
import { pull } from 'langchain/hub'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import type { PromptTemplate } from '@langchain/core/prompts'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { additionalCallbacks } from '../../../src/handler'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { createReactAgent } from '../../../src/agents'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MRKLAgentLLM_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -23,7 +25,7 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'ReAct Agent for LLMs'
|
||||
this.name = 'mrklAgentLLM'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
|
|
@ -40,6 +42,14 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
label: 'Language Model',
|
||||
name: 'model',
|
||||
type: 'BaseLanguageModel'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -48,9 +58,22 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
let tools = nodeData.inputs?.tools as Tool[]
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the ReAct Agent for LLMs
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
tools = flatten(tools)
|
||||
|
||||
const prompt = await pull<PromptTemplate>('hwchase17/react')
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ import { getBaseClasses } from '../../../src/utils'
|
|||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
|
||||
import { Moderation, checkInputs } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class OpenAIFunctionAgent_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -26,7 +28,7 @@ class OpenAIFunctionAgent_Agents implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'OpenAI Function Agent'
|
||||
this.name = 'openAIFunctionAgent'
|
||||
this.version = 3.0
|
||||
this.version = 4.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'function.svg'
|
||||
|
|
@ -56,6 +58,14 @@ class OpenAIFunctionAgent_Agents implements INode {
|
|||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
|
|
@ -67,6 +77,19 @@ class OpenAIFunctionAgent_Agents implements INode {
|
|||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the OpenAI Function Agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,224 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { AgentStep } from '@langchain/core/agents'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
|
||||
import { XMLAgentOutputParser } from 'langchain/agents/xml/output_parser'
|
||||
import { formatLogToMessage } from 'langchain/agents/format_scratchpad/log_to_message'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor } from '../../../src/agents'
|
||||
import { Moderation, checkInputs } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
const defaultSystemMessage = `You are a helpful assistant. Help the user answer any questions.
|
||||
|
||||
You have access to the following tools:
|
||||
|
||||
{tools}
|
||||
|
||||
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
|
||||
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
|
||||
|
||||
<tool>search</tool><tool_input>weather in SF</tool_input>
|
||||
<observation>64 degrees</observation>
|
||||
|
||||
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
|
||||
|
||||
<final_answer>The weather in SF is 64 degrees</final_answer>
|
||||
|
||||
Begin!
|
||||
|
||||
Previous Conversation:
|
||||
{chat_history}
|
||||
|
||||
Question: {input}
|
||||
{agent_scratchpad}`
|
||||
|
||||
class XMLAgent_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'XML Agent'
|
||||
this.name = 'xmlAgent'
|
||||
this.version = 2.0
|
||||
this.type = 'XMLAgent'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'xmlagent.svg'
|
||||
this.description = `Agent that is designed for LLMs that are good for reasoning/writing XML (e.g: Anthropic Claude)`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
warning: 'Prompt must include input variables: {tools}, {chat_history}, {input} and {agent_scratchpad}',
|
||||
rows: 4,
|
||||
default: defaultSystemMessage,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the OpenAI Function Agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: res?.output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
return sourceDocuments.length ? { text: res?.output, sourceDocuments: flatten(sourceDocuments) } : res?.output
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = async (
|
||||
nodeData: INodeData,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string },
|
||||
chatHistory: IMessage[] = []
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
|
||||
let promptMessage = systemMessage ? systemMessage : defaultSystemMessage
|
||||
if (memory.memoryKey) promptMessage = promptMessage.replaceAll('{chat_history}', `{${memory.memoryKey}}`)
|
||||
if (memory.inputKey) promptMessage = promptMessage.replaceAll('{input}', `{${memory.inputKey}}`)
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
HumanMessagePromptTemplate.fromTemplate(promptMessage),
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
])
|
||||
|
||||
const missingVariables = ['tools', 'agent_scratchpad'].filter((v) => !prompt.inputVariables.includes(v))
|
||||
|
||||
if (missingVariables.length > 0) {
|
||||
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`)
|
||||
}
|
||||
|
||||
const llmWithStop = model.bind({ stop: ['</tool_input>', '</final_answer>'] })
|
||||
|
||||
const messages = (await memory.getChatMessages(flowObj.sessionId, false, chatHistory)) as IMessage[]
|
||||
let chatHistoryMsgTxt = ''
|
||||
for (const message of messages) {
|
||||
if (message.type === 'apiMessage') {
|
||||
chatHistoryMsgTxt += `\\nAI:${message.message}`
|
||||
} else if (message.type === 'userMessage') {
|
||||
chatHistoryMsgTxt += `\\nHuman:${message.message}`
|
||||
}
|
||||
}
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; tools: Tool[]; steps: AgentStep[] }) => i.input,
|
||||
agent_scratchpad: (i: { input: string; tools: Tool[]; steps: AgentStep[] }) => formatLogToMessage(i.steps),
|
||||
tools: (_: { input: string; tools: Tool[]; steps: AgentStep[] }) =>
|
||||
tools.map((tool: Tool) => `${tool.name}: ${tool.description}`),
|
||||
[memoryKey]: (_: { input: string; tools: Tool[]; steps: AgentStep[] }) => chatHistoryMsgTxt
|
||||
},
|
||||
prompt,
|
||||
llmWithStop,
|
||||
new XMLAgentOutputParser()
|
||||
])
|
||||
|
||||
const executor = AgentExecutor.fromAgentAndTools({
|
||||
agent: runnableAgent,
|
||||
tools,
|
||||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
isXML: true,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: XMLAgent_Agents }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-file-type-xml" width="24" height="24" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M14 3v4a1 1 0 0 0 1 1h4" /><path d="M5 12v-7a2 2 0 0 1 2 -2h7l5 5v4" /><path d="M4 15l4 6" /><path d="M4 21l4 -6" /><path d="M19 15v6h3" /><path d="M11 21v-6l2.5 3l2.5 -3v6" /></svg>
|
||||
|
After Width: | Height: | Size: 476 B |
|
|
@ -3,6 +3,8 @@ import { APIChain, createOpenAPIChain } from 'langchain/chains'
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class OpenApiChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +20,7 @@ class OpenApiChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'OpenAPI Chain'
|
||||
this.name = 'openApiChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'OpenAPIChain'
|
||||
this.icon = 'openapi.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -50,6 +52,14 @@ class OpenApiChain_Chains implements INode {
|
|||
type: 'json',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -58,11 +68,21 @@ class OpenApiChain_Chains implements INode {
|
|||
return await initChain(nodeData)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const chain = await initChain(nodeData)
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the OpenAPI chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
if (options.socketIO && options.socketIOClientId) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
const res = await chain.run(input, [loggerHandler, handler, ...callbacks])
|
||||
|
|
|
|||
|
|
@ -1,14 +1,30 @@
|
|||
import { ConversationChain } from 'langchain/chains'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts'
|
||||
import {
|
||||
ChatPromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
SystemMessagePromptTemplate,
|
||||
BaseMessagePromptTemplateLike,
|
||||
PromptTemplate
|
||||
} from '@langchain/core/prompts'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { HumanMessage } from '@langchain/core/messages'
|
||||
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import {
|
||||
IVisionChatModal,
|
||||
FlowiseMemory,
|
||||
ICommonObject,
|
||||
INode,
|
||||
INodeData,
|
||||
INodeParams,
|
||||
MessageContentImageUrl
|
||||
} from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
|
||||
|
|
@ -145,16 +161,32 @@ class ConversationChain_Chains implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => {
|
||||
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageContentImageUrl[]) => {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const prompt = nodeData.inputs?.systemMessagePrompt as string
|
||||
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
|
||||
let model = nodeData.inputs?.model as BaseChatModel
|
||||
|
||||
if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) {
|
||||
const sysPrompt = chatPromptTemplate.promptMessages[0]
|
||||
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
|
||||
const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt]
|
||||
if (humanImageMessages.length) messages.push(...humanImageMessages)
|
||||
|
||||
// OpenAI works better when separate images into standalone human messages
|
||||
if (model instanceof ChatOpenAI && humanImageMessages.length) {
|
||||
messages.push(new HumanMessage({ content: [...humanImageMessages] }))
|
||||
} else if (humanImageMessages.length) {
|
||||
const lastMessage = messages.pop() as HumanMessagePromptTemplate
|
||||
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...humanImageMessages,
|
||||
{
|
||||
text: template
|
||||
}
|
||||
])
|
||||
msg.inputVariables = lastMessage.inputVariables
|
||||
messages.push(msg)
|
||||
}
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||
if ((chatPromptTemplate as any).promptValues) {
|
||||
|
|
@ -165,12 +197,18 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage
|
|||
return chatPrompt
|
||||
}
|
||||
|
||||
const messages = [
|
||||
const messages: BaseMessagePromptTemplateLike[] = [
|
||||
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
|
||||
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
||||
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
|
||||
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history')
|
||||
]
|
||||
if (humanImageMessages.length) messages.push(...(humanImageMessages as any[]))
|
||||
|
||||
// OpenAI works better when separate images into standalone human messages
|
||||
if (model instanceof ChatOpenAI && humanImageMessages.length) {
|
||||
messages.push(HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`))
|
||||
messages.push(new HumanMessage({ content: [...humanImageMessages] }))
|
||||
} else if (humanImageMessages.length) {
|
||||
messages.push(HumanMessagePromptTemplate.fromTemplate([`{${inputKey}}`, ...humanImageMessages]))
|
||||
}
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||
|
||||
|
|
@ -179,32 +217,23 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage
|
|||
|
||||
const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
|
||||
const chatHistory = options.chatHistory
|
||||
let model = nodeData.inputs?.model as ChatOpenAI
|
||||
let model = nodeData.inputs?.model as BaseChatModel
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const memoryKey = memory.memoryKey ?? 'chat_history'
|
||||
|
||||
let humanImageMessages: HumanMessage[] = []
|
||||
if (model instanceof ChatOpenAI) {
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
let messageContent: MessageContentImageUrl[] = []
|
||||
if (llmSupportsVision(model)) {
|
||||
messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision
|
||||
model.modelName = 'gpt-4-vision-preview'
|
||||
|
||||
// Change default max token to higher when using gpt-4-vision
|
||||
model.maxTokens = 1024
|
||||
|
||||
for (const msg of messageContent) {
|
||||
humanImageMessages.push(new HumanMessage({ content: [msg] }))
|
||||
}
|
||||
visionChatModel.setVisionModel()
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
model.modelName = model.configuredModel
|
||||
model.maxTokens = model.configuredMaxToken
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages)
|
||||
const chatPrompt = prepareChatPrompt(nodeData, messageContent)
|
||||
let promptVariables = {}
|
||||
const promptValuesRaw = (chatPrompt as any).promptValues
|
||||
if (promptValuesRaw) {
|
||||
|
|
@ -228,7 +257,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s
|
|||
},
|
||||
...promptVariables
|
||||
},
|
||||
prepareChatPrompt(nodeData, humanImageMessages),
|
||||
prepareChatPrompt(nodeData, messageContent),
|
||||
model,
|
||||
new StringOutputParser()
|
||||
])
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from '@langch
|
|||
import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from '@langchain/core/runnables'
|
||||
import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages'
|
||||
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||
import type { Document } from '@langchain/core/documents'
|
||||
import { BufferMemoryInput } from 'langchain/memory'
|
||||
|
|
@ -36,7 +38,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Retrieval QA Chain'
|
||||
this.name = 'conversationalRetrievalQAChain'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'ConversationalRetrievalQAChain'
|
||||
this.icon = 'qa.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -87,6 +89,14 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
additionalParams: true,
|
||||
optional: true,
|
||||
default: RESPONSE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
/** Deprecated
|
||||
{
|
||||
|
|
@ -163,6 +173,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
}
|
||||
|
||||
let memory: FlowiseMemory | undefined = externalMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (!memory) {
|
||||
memory = new BufferMemory({
|
||||
returnMessages: true,
|
||||
|
|
@ -171,6 +182,16 @@ class ConversationalRetrievalQAChain_Chains implements INode {
|
|||
})
|
||||
}
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Conversational Retrieval QA Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt)
|
||||
|
||||
const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? []
|
||||
|
|
|
|||
|
|
@ -1,16 +1,15 @@
|
|||
import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
|
||||
import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers'
|
||||
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
|
||||
import { HumanMessage } from '@langchain/core/messages'
|
||||
import { ChatPromptTemplate, FewShotPromptTemplate, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { OutputFixingParser } from 'langchain/output_parsers'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { IVisionChatModal, ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from '../../../src/handler'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
|
||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
||||
import { HumanMessage } from 'langchain/schema'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
|
||||
class LLMChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -164,7 +163,6 @@ const runPrediction = async (
|
|||
const socketIO = isStreaming ? options.socketIO : undefined
|
||||
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
let model = nodeData.inputs?.model as ChatOpenAI
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
|
|
@ -183,24 +181,39 @@ const runPrediction = async (
|
|||
* TO: { "value": "hello i am ben\n\n\thow are you?" }
|
||||
*/
|
||||
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (chain.llm instanceof ChatOpenAI) {
|
||||
const chatOpenAI = chain.llm as ChatOpenAI
|
||||
if (llmSupportsVision(chain.llm)) {
|
||||
const visionChatModel = chain.llm as IVisionChatModal
|
||||
const messageContent = addImagesToMessages(nodeData, options, visionChatModel.multiModalOption)
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
|
||||
chatOpenAI.modelName = 'gpt-4-vision-preview'
|
||||
chatOpenAI.maxTokens = 1024
|
||||
visionChatModel.setVisionModel()
|
||||
// Add image to the message
|
||||
if (chain.prompt instanceof PromptTemplate) {
|
||||
const existingPromptTemplate = chain.prompt.template as string
|
||||
let newChatPromptTemplate = ChatPromptTemplate.fromMessages([
|
||||
HumanMessagePromptTemplate.fromTemplate(existingPromptTemplate)
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: existingPromptTemplate
|
||||
}
|
||||
])
|
||||
newChatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
|
||||
chain.prompt = newChatPromptTemplate
|
||||
msg.inputVariables = chain.prompt.inputVariables
|
||||
chain.prompt = ChatPromptTemplate.fromMessages([msg])
|
||||
} else if (chain.prompt instanceof ChatPromptTemplate) {
|
||||
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
|
||||
if (chain.prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
|
||||
const lastMessage = chain.prompt.promptMessages.pop() as HumanMessagePromptTemplate
|
||||
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: template
|
||||
}
|
||||
])
|
||||
msg.inputVariables = lastMessage.inputVariables
|
||||
chain.prompt.promptMessages.push(msg)
|
||||
} else {
|
||||
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
|
||||
}
|
||||
} else if (chain.prompt instanceof FewShotPromptTemplate) {
|
||||
let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string
|
||||
let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([
|
||||
|
|
@ -212,8 +225,7 @@ const runPrediction = async (
|
|||
}
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
chatOpenAI.modelName = model.configuredModel
|
||||
chatOpenAI.maxTokens = model.configuredMaxToken
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ import { MultiPromptChain } from 'langchain/chains'
|
|||
import { ICommonObject, INode, INodeData, INodeParams, PromptRetriever } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MultiPromptChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +20,7 @@ class MultiPromptChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Multi Prompt Chain'
|
||||
this.name = 'multiPromptChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'MultiPromptChain'
|
||||
this.icon = 'prompt.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -35,6 +37,14 @@ class MultiPromptChain_Chains implements INode {
|
|||
name: 'promptRetriever',
|
||||
type: 'PromptRetriever',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -62,8 +72,19 @@ class MultiPromptChain_Chains implements INode {
|
|||
return chain
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const chain = nodeData.instance as MultiPromptChain
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Multi Prompt Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const obj = { input }
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ import { MultiRetrievalQAChain } from 'langchain/chains'
|
|||
import { ICommonObject, INode, INodeData, INodeParams, VectorStoreRetriever } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MultiRetrievalQAChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +20,7 @@ class MultiRetrievalQAChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Multi Retrieval QA Chain'
|
||||
this.name = 'multiRetrievalQAChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'MultiRetrievalQAChain'
|
||||
this.icon = 'qa.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -41,6 +43,14 @@ class MultiRetrievalQAChain_Chains implements INode {
|
|||
name: 'returnSourceDocuments',
|
||||
type: 'boolean',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -72,7 +82,17 @@ class MultiRetrievalQAChain_Chains implements INode {
|
|||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const chain = nodeData.instance as MultiRetrievalQAChain
|
||||
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
|
||||
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Multi Retrieval QA Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const obj = { input }
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ import { RetrievalQAChain } from 'langchain/chains'
|
|||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class RetrievalQAChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -19,7 +21,7 @@ class RetrievalQAChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Retrieval QA Chain'
|
||||
this.name = 'retrievalQAChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'RetrievalQAChain'
|
||||
this.icon = 'qa.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -35,6 +37,14 @@ class RetrievalQAChain_Chains implements INode {
|
|||
label: 'Vector Store Retriever',
|
||||
name: 'vectorStoreRetriever',
|
||||
type: 'BaseRetriever'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -47,8 +57,19 @@ class RetrievalQAChain_Chains implements INode {
|
|||
return chain
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const chain = nodeData.instance as RetrievalQAChain
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Retrieval QA Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const obj = {
|
||||
query: input
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ import { SqlDatabase } from 'langchain/sql_db'
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { getBaseClasses, getInputVariables } from '../../../src/utils'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
type DatabaseType = 'sqlite' | 'postgres' | 'mssql' | 'mysql'
|
||||
|
||||
|
|
@ -24,7 +26,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Sql Database Chain'
|
||||
this.name = 'sqlDatabaseChain'
|
||||
this.version = 4.0
|
||||
this.version = 5.0
|
||||
this.type = 'SqlDatabaseChain'
|
||||
this.icon = 'sqlchain.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -115,6 +117,14 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
placeholder: DEFAULT_SQL_DATABASE_PROMPT.template + DEFAULT_SQL_DATABASE_PROMPT.templateFormat,
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -144,7 +154,7 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
return chain
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const databaseType = nodeData.inputs?.database as DatabaseType
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const url = nodeData.inputs?.url as string
|
||||
|
|
@ -155,6 +165,17 @@ class SqlDatabaseChain_Chains implements INode {
|
|||
const sampleRowsInTableInfo = nodeData.inputs?.sampleRowsInTableInfo as number
|
||||
const topK = nodeData.inputs?.topK as number
|
||||
const customPrompt = nodeData.inputs?.customPrompt as string
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Sql Database Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const chain = await getSQLDBChain(
|
||||
databaseType,
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ import { VectaraStore } from '@langchain/community/vectorstores/vectara'
|
|||
import { VectorDBQAChain } from 'langchain/chains'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
// functionality based on https://github.com/vectara/vectara-answer
|
||||
const reorderCitations = (unorderedSummary: string) => {
|
||||
|
|
@ -48,7 +50,7 @@ class VectaraChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'Vectara QA Chain'
|
||||
this.name = 'vectaraQAChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'VectaraQAChain'
|
||||
this.icon = 'vectara.png'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -219,6 +221,14 @@ class VectaraChain_Chains implements INode {
|
|||
description: 'Maximum results used to build the summarized response',
|
||||
type: 'number',
|
||||
default: 7
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -227,7 +237,7 @@ class VectaraChain_Chains implements INode {
|
|||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string): Promise<object> {
|
||||
async run(nodeData: INodeData, input: string): Promise<string | object> {
|
||||
const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore
|
||||
const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng'
|
||||
const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string
|
||||
|
|
@ -252,6 +262,18 @@ class VectaraChain_Chains implements INode {
|
|||
const mmrRerankerId = 272725718
|
||||
const mmrEnabled = vectaraFilter?.mmrConfig?.enabled
|
||||
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the Vectara chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const data = {
|
||||
query: [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ import { VectorDBQAChain } from 'langchain/chains'
|
|||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class VectorDBQAChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -19,7 +21,7 @@ class VectorDBQAChain_Chains implements INode {
|
|||
constructor() {
|
||||
this.label = 'VectorDB QA Chain'
|
||||
this.name = 'vectorDBQAChain'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'VectorDBQAChain'
|
||||
this.icon = 'vectordb.svg'
|
||||
this.category = 'Chains'
|
||||
|
|
@ -35,6 +37,14 @@ class VectorDBQAChain_Chains implements INode {
|
|||
label: 'Vector Store',
|
||||
name: 'vectorStore',
|
||||
type: 'VectorStore'
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -50,8 +60,20 @@ class VectorDBQAChain_Chains implements INode {
|
|||
return chain
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const chain = nodeData.instance as VectorDBQAChain
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the VectorDB QA Chain
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
const obj = {
|
||||
query: input
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { BedrockChat } from '@langchain/community/chat_models/bedrock'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { BaseBedrockInput } from 'langchain/dist/util/bedrock'
|
||||
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
|
|
@ -95,6 +95,8 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
name: 'model',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'anthropic.claude-3-sonnet', name: 'anthropic.claude-3-sonnet-20240229-v1:0' },
|
||||
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
|
||||
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
|
||||
{ label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
|
||||
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' },
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import { AnthropicInput, ChatAnthropic } from '@langchain/anthropic'
|
||||
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatAnthropic } from './FlowiseChatAntrhopic'
|
||||
|
||||
class ChatAnthropic_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -19,12 +20,12 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatAnthropic'
|
||||
this.name = 'chatAnthropic'
|
||||
this.version = 3.0
|
||||
this.version = 4.0
|
||||
this.type = 'ChatAnthropic'
|
||||
this.icon = 'Anthropic.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around ChatAnthropic large language models that use the Chat endpoint'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatAnthropic)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatAnthropic)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -43,6 +44,16 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'claude-3-opus',
|
||||
name: 'claude-3-opus-20240229',
|
||||
description: 'Most powerful model for highly complex tasks'
|
||||
},
|
||||
{
|
||||
label: 'claude-3-sonnet',
|
||||
name: 'claude-3-sonnet-20240229',
|
||||
description: 'Ideal balance of intelligence and speed for enterprise workloads'
|
||||
},
|
||||
{
|
||||
label: 'claude-2',
|
||||
name: 'claude-2',
|
||||
|
|
@ -137,6 +148,15 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Automatically uses claude-3-* models when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
|
||||
default: false,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -153,6 +173,8 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
|
||||
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
|
||||
const obj: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string } = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
|
|
@ -165,7 +187,14 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
if (topK) obj.topK = parseFloat(topK)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatAnthropic(obj)
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatAnthropic(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,16 @@ class ChatAnthropic_LlamaIndex_ChatModels implements INode {
|
|||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'claude-3-opus',
|
||||
name: 'claude-3-opus-20240229',
|
||||
description: 'Most powerful model for highly complex tasks'
|
||||
},
|
||||
{
|
||||
label: 'claude-3-sonnet',
|
||||
name: 'claude-3-sonnet-20240229',
|
||||
description: 'Ideal balance of intelligence and speed for enterprise workloads'
|
||||
},
|
||||
{
|
||||
label: 'claude-2',
|
||||
name: 'claude-2',
|
||||
|
|
|
|||
|
|
@ -0,0 +1,33 @@
|
|||
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
|
||||
import { IVisionChatModal, IMultiModalOption } from '../../../src'
|
||||
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||
|
||||
export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string }) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.modelName || 'claude-3-opus-20240229'
|
||||
this.configuredMaxToken = fields?.maxTokens ?? 256
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
super.modelName = this.configuredModel
|
||||
super.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
if (!this.modelName.startsWith('claude-3')) {
|
||||
super.modelName = 'claude-3-opus-20240229'
|
||||
super.maxTokens = 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -228,7 +228,7 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
|
||||
const obj: Partial<OpenAIChatInput> &
|
||||
Partial<AzureOpenAIInput> &
|
||||
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption } = {
|
||||
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey,
|
||||
|
|
@ -265,10 +265,9 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
imageResolution
|
||||
}
|
||||
}
|
||||
obj.multiModalOption = multiModalOption
|
||||
|
||||
const model = new ChatOpenAI(nodeData.id, obj)
|
||||
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,38 +1,39 @@
|
|||
import type { ClientOptions } from 'openai'
|
||||
import {
|
||||
ChatOpenAI as LangchainChatOpenAI,
|
||||
OpenAIChatInput,
|
||||
LegacyOpenAIInput,
|
||||
AzureOpenAIInput,
|
||||
ChatOpenAICallOptions
|
||||
} from '@langchain/openai'
|
||||
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, LegacyOpenAIInput, AzureOpenAIInput } from '@langchain/openai'
|
||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { IMultiModalOption } from '../../../src'
|
||||
import { BaseMessageLike, LLMResult } from 'langchain/schema'
|
||||
import { Callbacks } from '@langchain/core/callbacks/manager'
|
||||
import { IMultiModalOption, IVisionChatModal } from '../../../src'
|
||||
|
||||
export class ChatOpenAI extends LangchainChatOpenAI {
|
||||
export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption?: IMultiModalOption
|
||||
configuredMaxToken: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(
|
||||
id: string,
|
||||
fields?: Partial<OpenAIChatInput> &
|
||||
Partial<AzureOpenAIInput> &
|
||||
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption },
|
||||
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput },
|
||||
/** @deprecated */
|
||||
configuration?: ClientOptions & LegacyOpenAIInput
|
||||
) {
|
||||
super(fields, configuration)
|
||||
this.id = id
|
||||
this.multiModalOption = fields?.multiModalOption
|
||||
this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo'
|
||||
this.configuredMaxToken = fields?.maxTokens
|
||||
this.configuredMaxToken = fields?.maxTokens ?? 256
|
||||
}
|
||||
|
||||
async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise<LLMResult> {
|
||||
return super.generate(messages, options, callbacks)
|
||||
revertToOriginalModel(): void {
|
||||
super.modelName = this.configuredModel
|
||||
super.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
super.modelName = 'gpt-4-vision-preview'
|
||||
super.maxTokens = 1024
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
import { Bedrock } from '@langchain/community/llms/bedrock'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||
import { BaseBedrockInput } from 'langchain/dist/util/bedrock'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
|
||||
|
||||
/**
|
||||
* I had to run the following to build the component
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import { FlowiseSummaryMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
|
||||
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
|
||||
import { ConversationSummaryMemory, ConversationSummaryMemoryInput } from 'langchain/memory'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { BaseMessage } from '@langchain/core/messages'
|
||||
import { ConversationSummaryMemory, ConversationSummaryMemoryInput } from 'langchain/memory'
|
||||
|
||||
class ConversationSummaryMemory_Memory implements INode {
|
||||
label: string
|
||||
|
|
|
|||
|
|
@ -90,12 +90,17 @@ class CustomFunction_Utilities implements INode {
|
|||
|
||||
// Some values might be a stringified JSON, parse it
|
||||
for (const key in inputVars) {
|
||||
if (typeof inputVars[key] === 'string' && inputVars[key].startsWith('{') && inputVars[key].endsWith('}')) {
|
||||
try {
|
||||
inputVars[key] = JSON.parse(inputVars[key])
|
||||
} catch (e) {
|
||||
continue
|
||||
let value = inputVars[key]
|
||||
if (typeof value === 'string') {
|
||||
value = handleEscapeCharacters(value, true)
|
||||
if (value.startsWith('{') && value.endsWith('}')) {
|
||||
try {
|
||||
value = JSON.parse(value)
|
||||
} catch (e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
inputVars[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -105,11 +110,7 @@ class CustomFunction_Utilities implements INode {
|
|||
|
||||
if (Object.keys(inputVars).length) {
|
||||
for (const item in inputVars) {
|
||||
let value = inputVars[item]
|
||||
if (typeof value === 'string') {
|
||||
value = handleEscapeCharacters(value, true)
|
||||
}
|
||||
sandbox[`$${item}`] = value
|
||||
sandbox[`$${item}`] = inputVars[item]
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -101,12 +101,17 @@ class IfElseFunction_Utilities implements INode {
|
|||
|
||||
// Some values might be a stringified JSON, parse it
|
||||
for (const key in inputVars) {
|
||||
if (typeof inputVars[key] === 'string' && inputVars[key].startsWith('{') && inputVars[key].endsWith('}')) {
|
||||
try {
|
||||
inputVars[key] = JSON.parse(inputVars[key])
|
||||
} catch (e) {
|
||||
continue
|
||||
let value = inputVars[key]
|
||||
if (typeof value === 'string') {
|
||||
value = handleEscapeCharacters(value, true)
|
||||
if (value.startsWith('{') && value.endsWith('}')) {
|
||||
try {
|
||||
value = JSON.parse(value)
|
||||
} catch (e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
inputVars[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -116,11 +121,7 @@ class IfElseFunction_Utilities implements INode {
|
|||
|
||||
if (Object.keys(inputVars).length) {
|
||||
for (const item in inputVars) {
|
||||
let value = inputVars[item]
|
||||
if (typeof value === 'string') {
|
||||
value = handleEscapeCharacters(value, true)
|
||||
}
|
||||
sandbox[`$${item}`] = value
|
||||
sandbox[`$${item}`] = inputVars[item]
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -142,10 +143,11 @@ class IfElseFunction_Utilities implements INode {
|
|||
const vm = new NodeVM(nodeVMOptions)
|
||||
try {
|
||||
const responseTrue = await vm.run(`module.exports = async function() {${ifFunction}}()`, __dirname)
|
||||
if (responseTrue) return { output: responseTrue, type: true }
|
||||
if (responseTrue)
|
||||
return { output: typeof responseTrue === 'string' ? handleEscapeCharacters(responseTrue, false) : responseTrue, type: true }
|
||||
|
||||
const responseFalse = await vm.run(`module.exports = async function() {${elseFunction}}()`, __dirname)
|
||||
return { output: responseFalse, type: false }
|
||||
return { output: typeof responseFalse === 'string' ? handleEscapeCharacters(responseFalse, false) : responseFalse, type: false }
|
||||
} catch (e) {
|
||||
throw new Error(e)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@
|
|||
"@google-ai/generativelanguage": "^0.2.1",
|
||||
"@google/generative-ai": "^0.1.3",
|
||||
"@huggingface/inference": "^2.6.1",
|
||||
"@langchain/anthropic": "^0.0.10",
|
||||
"@langchain/anthropic": "^0.1.4",
|
||||
"@langchain/cohere": "^0.0.5",
|
||||
"@langchain/community": "^0.0.30",
|
||||
"@langchain/google-genai": "^0.0.10",
|
||||
|
|
@ -65,15 +65,15 @@
|
|||
"ioredis": "^5.3.2",
|
||||
"jsonpointer": "^5.0.1",
|
||||
"langchain": "^0.1.20",
|
||||
"langfuse": "3.1.0",
|
||||
"langfuse-langchain": "^3.1.0",
|
||||
"langfuse": "3.3.1",
|
||||
"langfuse-langchain": "^3.3.1",
|
||||
"langsmith": "0.1.6",
|
||||
"linkifyjs": "^4.1.1",
|
||||
"llamaindex": "^0.0.48",
|
||||
"lunary": "^0.6.16",
|
||||
"mammoth": "^1.5.1",
|
||||
"moment": "^2.29.3",
|
||||
"mongodb": "^6.2.0",
|
||||
"mongodb": "6.2.0",
|
||||
"mysql2": "^3.5.1",
|
||||
"node-fetch": "^2.6.11",
|
||||
"node-html-markdown": "^1.3.0",
|
||||
|
|
|
|||
|
|
@ -270,3 +270,14 @@ export abstract class FlowiseSummaryMemory extends ConversationSummaryMemory imp
|
|||
abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void>
|
||||
abstract clearChatMessages(overrideSessionId?: string): Promise<void>
|
||||
}
|
||||
|
||||
export interface IVisionChatModal {
|
||||
id: string
|
||||
configuredModel: string
|
||||
configuredMaxToken: number
|
||||
multiModalOption: IMultiModalOption
|
||||
|
||||
setVisionModel(): void
|
||||
revertToOriginalModel(): void
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void
|
||||
}
|
||||
|
|
|
|||
|
|
@ -257,6 +257,8 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
|
|||
|
||||
input?: string
|
||||
|
||||
isXML?: boolean
|
||||
|
||||
/**
|
||||
* How to handle errors raised by the agent's output parser.
|
||||
Defaults to `False`, which raises the error.
|
||||
|
|
@ -277,7 +279,7 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
|
|||
return this.agent.returnValues
|
||||
}
|
||||
|
||||
constructor(input: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string }) {
|
||||
constructor(input: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string; isXML?: boolean }) {
|
||||
let agent: BaseSingleActionAgent | BaseMultiActionAgent
|
||||
if (Runnable.isRunnable(input.agent)) {
|
||||
agent = new RunnableAgent({ runnable: input.agent })
|
||||
|
|
@ -305,13 +307,17 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
|
|||
this.sessionId = input.sessionId
|
||||
this.chatId = input.chatId
|
||||
this.input = input.input
|
||||
this.isXML = input.isXML
|
||||
}
|
||||
|
||||
static fromAgentAndTools(fields: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string }): AgentExecutor {
|
||||
static fromAgentAndTools(
|
||||
fields: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string; isXML?: boolean }
|
||||
): AgentExecutor {
|
||||
const newInstance = new AgentExecutor(fields)
|
||||
if (fields.sessionId) newInstance.sessionId = fields.sessionId
|
||||
if (fields.chatId) newInstance.chatId = fields.chatId
|
||||
if (fields.input) newInstance.input = fields.input
|
||||
if (fields.isXML) newInstance.isXML = fields.isXML
|
||||
return newInstance
|
||||
}
|
||||
|
||||
|
|
@ -405,12 +411,16 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
|
|||
* - flowConfig?: { sessionId?: string, chatId?: string, input?: string }
|
||||
*/
|
||||
observation = tool
|
||||
? // @ts-ignore
|
||||
await tool.call(action.toolInput, runManager?.getChild(), undefined, {
|
||||
sessionId: this.sessionId,
|
||||
chatId: this.chatId,
|
||||
input: this.input
|
||||
})
|
||||
? await (tool as any).call(
|
||||
this.isXML && typeof action.toolInput === 'string' ? { input: action.toolInput } : action.toolInput,
|
||||
runManager?.getChild(),
|
||||
undefined,
|
||||
{
|
||||
sessionId: this.sessionId,
|
||||
chatId: this.chatId,
|
||||
input: this.input
|
||||
}
|
||||
)
|
||||
: `${action.tool} is not a valid tool, try another one.`
|
||||
} catch (e) {
|
||||
if (e instanceof ToolInputParsingException) {
|
||||
|
|
@ -526,12 +536,16 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
|
|||
* - tags?: string[]
|
||||
* - flowConfig?: { sessionId?: string, chatId?: string, input?: string }
|
||||
*/
|
||||
// @ts-ignore
|
||||
observation = await tool.call(agentAction.toolInput, runManager?.getChild(), undefined, {
|
||||
sessionId: this.sessionId,
|
||||
chatId: this.chatId,
|
||||
input: this.input
|
||||
})
|
||||
observation = await (tool as any).call(
|
||||
this.isXML && typeof agentAction.toolInput === 'string' ? { input: agentAction.toolInput } : agentAction.toolInput,
|
||||
runManager?.getChild(),
|
||||
undefined,
|
||||
{
|
||||
sessionId: this.sessionId,
|
||||
chatId: this.chatId,
|
||||
input: this.input
|
||||
}
|
||||
)
|
||||
if (observation?.includes(SOURCE_DOCUMENTS_PREFIX)) {
|
||||
const observationArray = observation.split(SOURCE_DOCUMENTS_PREFIX)
|
||||
observation = observationArray[0]
|
||||
|
|
|
|||
|
|
@ -261,7 +261,8 @@ export const additionalCallbacks = async (nodeData: INodeData, options: ICommonO
|
|||
let langFuseOptions: any = {
|
||||
secretKey: langFuseSecretKey,
|
||||
publicKey: langFusePublicKey,
|
||||
baseUrl: langFuseEndpoint ?? 'https://cloud.langfuse.com'
|
||||
baseUrl: langFuseEndpoint ?? 'https://cloud.langfuse.com',
|
||||
sdkIntegration: 'Flowise'
|
||||
}
|
||||
if (release) langFuseOptions.release = release
|
||||
if (options.chatId) langFuseOptions.sessionId = options.chatId
|
||||
|
|
@ -340,6 +341,7 @@ export class AnalyticHandler {
|
|||
secretKey: langFuseSecretKey,
|
||||
publicKey: langFusePublicKey,
|
||||
baseUrl: langFuseEndpoint ?? 'https://cloud.langfuse.com',
|
||||
sdkIntegration: 'Flowise',
|
||||
release
|
||||
})
|
||||
this.handlers['langFuse'] = { client: langfuse }
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface'
|
||||
import { ChatOpenAI as LangchainChatOpenAI } from 'langchain/chat_models/openai'
|
||||
import { IVisionChatModal, ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface'
|
||||
import path from 'path'
|
||||
import { getStoragePath } from './utils'
|
||||
import fs from 'fs'
|
||||
|
|
@ -12,7 +11,7 @@ export const addImagesToMessages = (
|
|||
const imageContent: MessageContentImageUrl[] = []
|
||||
let model = nodeData.inputs?.model
|
||||
|
||||
if (model instanceof LangchainChatOpenAI && multiModalOption) {
|
||||
if (llmSupportsVision(model) && multiModalOption) {
|
||||
// Image Uploaded
|
||||
if (multiModalOption.image && multiModalOption.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) {
|
||||
const imageUploads = getImageUploads(options.uploads)
|
||||
|
|
@ -46,3 +45,5 @@ export const getAudioUploads = (uploads: IFileUpload[]) => {
|
|||
export const getImageUploads = (uploads: IFileUpload[]) => {
|
||||
return uploads.filter((upload: IFileUpload) => upload.mime.startsWith('image/'))
|
||||
}
|
||||
|
||||
export const llmSupportsVision = (value: any): value is IVisionChatModal => !!value?.multiModalOption
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
"data": {
|
||||
"id": "openApiChain_1",
|
||||
"label": "OpenAPI Chain",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "openApiChain",
|
||||
"type": "OpenAPIChain",
|
||||
"baseClasses": ["OpenAPIChain", "BaseChain"],
|
||||
|
|
@ -53,9 +53,19 @@
|
|||
"name": "model",
|
||||
"type": "ChatOpenAI",
|
||||
"id": "openApiChain_1-input-model-ChatOpenAI"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "openApiChain_1-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_1.data.instance}}",
|
||||
"yamlLink": "https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml",
|
||||
"headers": ""
|
||||
|
|
@ -399,7 +409,7 @@
|
|||
"id": "openAIFunctionAgent_0",
|
||||
"label": "OpenAI Function Agent",
|
||||
"name": "openAIFunctionAgent",
|
||||
"version": 3,
|
||||
"version": 4,
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain"],
|
||||
"category": "Agents",
|
||||
|
|
@ -434,9 +444,19 @@
|
|||
"name": "model",
|
||||
"type": "BaseChatModel",
|
||||
"id": "openAIFunctionAgent_0-input-model-BaseChatModel"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "openAIFunctionAgent_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{chainTool_0.data.instance}}"],
|
||||
"memory": "{{bufferMemory_0.data.instance}}",
|
||||
"model": "{{chatOpenAI_2.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -1100,7 +1100,7 @@
|
|||
"data": {
|
||||
"id": "conversationalAgent_0",
|
||||
"label": "Conversational Agent",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalAgent",
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
|
||||
|
|
@ -1137,9 +1137,19 @@
|
|||
"name": "memory",
|
||||
"type": "BaseChatMemory",
|
||||
"id": "conversationalAgent_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalAgent_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"],
|
||||
"model": "{{chatOpenAI_3.data.instance}}",
|
||||
"memory": "{{bufferMemory_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
"data": {
|
||||
"id": "autoGPT_0",
|
||||
"label": "AutoGPT",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "autoGPT",
|
||||
"type": "AutoGPT",
|
||||
"baseClasses": ["AutoGPT"],
|
||||
|
|
@ -66,9 +66,19 @@
|
|||
"name": "vectorStoreRetriever",
|
||||
"type": "BaseRetriever",
|
||||
"id": "autoGPT_0-input-vectorStoreRetriever-BaseRetriever"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "autoGPT_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{readFile_0.data.instance}}", "{{writeFile_1.data.instance}}", "{{serpAPI_0.data.instance}}"],
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{pinecone_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
"data": {
|
||||
"id": "babyAGI_1",
|
||||
"label": "BabyAGI",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "babyAGI",
|
||||
"type": "BabyAGI",
|
||||
"baseClasses": ["BabyAGI"],
|
||||
|
|
@ -42,9 +42,19 @@
|
|||
"name": "vectorStore",
|
||||
"type": "VectorStore",
|
||||
"id": "babyAGI_1-input-vectorStore-VectorStore"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "babyAGI_1-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStore": "{{pinecone_0.data.instance}}",
|
||||
"taskLoop": 3
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
"id": "csvAgent_0",
|
||||
"label": "CSV Agent",
|
||||
"name": "csvAgent",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain"],
|
||||
"category": "Agents",
|
||||
|
|
@ -36,9 +36,19 @@
|
|||
"name": "model",
|
||||
"type": "BaseLanguageModel",
|
||||
"id": "csvAgent_0-input-model-BaseLanguageModel"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "csvAgent_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}"
|
||||
},
|
||||
"outputAnchors": [
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
"data": {
|
||||
"id": "conversationalRetrievalQAChain_0",
|
||||
"label": "Conversational Retrieval QA Chain",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalRetrievalQAChain",
|
||||
"type": "ConversationalRetrievalQAChain",
|
||||
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
|
||||
|
|
@ -74,9 +74,19 @@
|
|||
"optional": true,
|
||||
"description": "If left empty, a default BufferMemory will be used",
|
||||
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}",
|
||||
"memory": "",
|
||||
|
|
|
|||
|
|
@ -451,7 +451,7 @@
|
|||
"id": "mrklAgentChat_0",
|
||||
"label": "MRKL Agent for Chat Models",
|
||||
"name": "mrklAgentChat",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain"],
|
||||
"category": "Agents",
|
||||
|
|
@ -470,9 +470,19 @@
|
|||
"name": "model",
|
||||
"type": "BaseLanguageModel",
|
||||
"id": "mrklAgentChat_0-input-model-BaseLanguageModel"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "mrklAgentChat_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{requestsGet_0.data.instance}}", "{{requestsPost_0.data.instance}}", "{{aiPlugin_0.data.instance}}"],
|
||||
"model": "{{chatOpenAI_0.data.instance}}"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -160,7 +160,7 @@
|
|||
"data": {
|
||||
"id": "chatAnthropic_0",
|
||||
"label": "ChatAnthropic",
|
||||
"version": 3,
|
||||
"version": 4,
|
||||
"name": "chatAnthropic",
|
||||
"type": "ChatAnthropic",
|
||||
"baseClasses": ["ChatAnthropic", "BaseChatModel", "BaseLanguageModel", "Runnable"],
|
||||
|
|
@ -179,6 +179,16 @@
|
|||
"name": "modelName",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "claude-3-opus",
|
||||
"name": "claude-3-opus-20240229",
|
||||
"description": "Most powerful model for highly complex tasks"
|
||||
},
|
||||
{
|
||||
"label": "claude-3-sonnet",
|
||||
"name": "claude-3-sonnet-20240229",
|
||||
"description": "Ideal balance of intelligence and speed for enterprise workloads"
|
||||
},
|
||||
{
|
||||
"label": "claude-2",
|
||||
"name": "claude-2",
|
||||
|
|
@ -278,6 +288,15 @@
|
|||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatAnthropic_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Allow Image Uploads",
|
||||
"name": "allowImageUploads",
|
||||
"type": "boolean",
|
||||
"description": "Automatically uses claude-3-* models when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
|
||||
"default": false,
|
||||
"optional": true,
|
||||
"id": "chatAnthropic_0-input-allowImageUploads-boolean"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -295,7 +314,8 @@
|
|||
"temperature": 0.9,
|
||||
"maxTokensToSample": "",
|
||||
"topP": "",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"allowImageUploads": true
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -392,7 +392,7 @@
|
|||
"data": {
|
||||
"id": "conversationalAgent_0",
|
||||
"label": "Conversational Agent",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalAgent",
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
|
||||
|
|
@ -429,9 +429,19 @@
|
|||
"name": "memory",
|
||||
"type": "BaseChatMemory",
|
||||
"id": "conversationalAgent_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalAgent_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{calculator_1.data.instance}}", "{{serpAPI_0.data.instance}}"],
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"memory": "{{bufferMemory_1.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@
|
|||
"data": {
|
||||
"id": "conversationalRetrievalAgent_0",
|
||||
"label": "Conversational Retrieval Agent",
|
||||
"version": 3,
|
||||
"version": 4,
|
||||
"name": "conversationalRetrievalAgent",
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
|
||||
|
|
@ -159,9 +159,19 @@
|
|||
"name": "model",
|
||||
"type": "BaseChatModel",
|
||||
"id": "conversationalRetrievalAgent_0-input-model-BaseChatModel"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalRetrievalAgent_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{retrieverTool_0.data.instance}}"],
|
||||
"memory": "{{bufferMemory_0.data.instance}}",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -274,7 +274,7 @@
|
|||
"data": {
|
||||
"id": "conversationalRetrievalQAChain_0",
|
||||
"label": "Conversational Retrieval QA Chain",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalRetrievalQAChain",
|
||||
"type": "ConversationalRetrievalQAChain",
|
||||
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
|
||||
|
|
@ -333,9 +333,19 @@
|
|||
"optional": true,
|
||||
"description": "If left empty, a default BufferMemory will be used",
|
||||
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{pinecone_0.data.instance}}",
|
||||
"memory": "",
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@
|
|||
"id": "conversationalRetrievalQAChain_0",
|
||||
"label": "Conversational Retrieval QA Chain",
|
||||
"name": "conversationalRetrievalQAChain",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"type": "ConversationalRetrievalQAChain",
|
||||
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
|
||||
"category": "Chains",
|
||||
|
|
@ -216,9 +216,19 @@
|
|||
"optional": true,
|
||||
"description": "If left empty, a default BufferMemory will be used",
|
||||
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}",
|
||||
"memory": "",
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@
|
|||
"data": {
|
||||
"id": "conversationalRetrievalQAChain_0",
|
||||
"label": "Conversational Retrieval QA Chain",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalRetrievalQAChain",
|
||||
"type": "ConversationalRetrievalQAChain",
|
||||
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
|
||||
|
|
@ -144,9 +144,19 @@
|
|||
"optional": true,
|
||||
"description": "If left empty, a default BufferMemory will be used",
|
||||
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOllama_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{faiss_0.data.instance}}",
|
||||
"memory": "",
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
"data": {
|
||||
"id": "conversationalRetrievalQAChain_0",
|
||||
"label": "Conversational Retrieval QA Chain",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalRetrievalQAChain",
|
||||
"type": "ConversationalRetrievalQAChain",
|
||||
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
|
||||
|
|
@ -74,9 +74,19 @@
|
|||
"optional": true,
|
||||
"description": "If left empty, a default BufferMemory will be used",
|
||||
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{qdrant_0.data.instance}}",
|
||||
"memory": "{{ZepMemory_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -251,7 +251,7 @@
|
|||
"data": {
|
||||
"id": "conversationalRetrievalQAChain_0",
|
||||
"label": "Conversational Retrieval QA Chain",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalRetrievalQAChain",
|
||||
"type": "ConversationalRetrievalQAChain",
|
||||
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
|
||||
|
|
@ -310,9 +310,19 @@
|
|||
"optional": true,
|
||||
"description": "If left empty, a default BufferMemory will be used",
|
||||
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{pinecone_0.data.instance}}",
|
||||
"rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@
|
|||
"id": "multiPromptChain_0",
|
||||
"label": "Multi Prompt Chain",
|
||||
"name": "multiPromptChain",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "MultiPromptChain",
|
||||
"baseClasses": ["MultiPromptChain", "MultiRouteChain", "BaseChain", "BaseLangChain"],
|
||||
"category": "Chains",
|
||||
|
|
@ -103,9 +103,19 @@
|
|||
"type": "PromptRetriever",
|
||||
"list": true,
|
||||
"id": "multiPromptChain_0-input-promptRetriever-PromptRetriever"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "multiPromptChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"promptRetriever": [
|
||||
"{{promptRetriever_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@
|
|||
"data": {
|
||||
"id": "multiRetrievalQAChain_0",
|
||||
"label": "Multi Retrieval QA Chain",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "multiRetrievalQAChain",
|
||||
"type": "MultiRetrievalQAChain",
|
||||
"baseClasses": ["MultiRetrievalQAChain", "MultiRouteChain", "BaseChain", "BaseLangChain"],
|
||||
|
|
@ -109,9 +109,19 @@
|
|||
"type": "VectorStoreRetriever",
|
||||
"list": true,
|
||||
"id": "multiRetrievalQAChain_0-input-vectorStoreRetriever-VectorStoreRetriever"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "multiRetrievalQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": [
|
||||
"{{vectorStoreRetriever_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@
|
|||
"data": {
|
||||
"id": "retrievalQAChain_0",
|
||||
"label": "Retrieval QA Chain",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "retrievalQAChain",
|
||||
"type": "RetrievalQAChain",
|
||||
"baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"],
|
||||
|
|
@ -182,9 +182,19 @@
|
|||
"name": "vectorStoreRetriever",
|
||||
"type": "BaseRetriever",
|
||||
"id": "retrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "retrievalQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{redis_0.data.instance}}"
|
||||
},
|
||||
|
|
@ -218,7 +228,7 @@
|
|||
"data": {
|
||||
"id": "retrievalQAChain_1",
|
||||
"label": "Retrieval QA Chain",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "retrievalQAChain",
|
||||
"type": "RetrievalQAChain",
|
||||
"baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"],
|
||||
|
|
@ -237,9 +247,19 @@
|
|||
"name": "vectorStoreRetriever",
|
||||
"type": "BaseRetriever",
|
||||
"id": "retrievalQAChain_1-input-vectorStoreRetriever-BaseRetriever"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "retrievalQAChain_1-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_1.data.instance}}",
|
||||
"vectorStoreRetriever": "{{faiss_0.data.instance}}"
|
||||
},
|
||||
|
|
@ -1741,7 +1761,7 @@
|
|||
"data": {
|
||||
"id": "conversationalAgent_0",
|
||||
"label": "Conversational Agent",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalAgent",
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
|
||||
|
|
@ -1778,9 +1798,19 @@
|
|||
"name": "memory",
|
||||
"type": "BaseChatMemory",
|
||||
"id": "conversationalAgent_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalAgent_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{chainTool_2.data.instance}}", "{{chainTool_3.data.instance}}"],
|
||||
"model": "{{chatOpenAI_2.data.instance}}",
|
||||
"memory": "{{bufferMemory_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@
|
|||
"id": "openAIFunctionAgent_0",
|
||||
"label": "OpenAI Function Agent",
|
||||
"name": "openAIFunctionAgent",
|
||||
"version": 3,
|
||||
"version": 4,
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain"],
|
||||
"category": "Agents",
|
||||
|
|
@ -243,9 +243,19 @@
|
|||
"name": "model",
|
||||
"type": "BaseChatModel",
|
||||
"id": "openAIFunctionAgent_0-input-model-BaseChatModel"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "openAIFunctionAgent_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{calculator_0.data.instance}}", "{{serper_0.data.instance}}", "{{customTool_0.data.instance}}"],
|
||||
"memory": "{{bufferMemory_0.data.instance}}",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -382,6 +382,16 @@
|
|||
"name": "modelName",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "claude-3-opus",
|
||||
"name": "claude-3-opus-20240229",
|
||||
"description": "Most powerful model for highly complex tasks"
|
||||
},
|
||||
{
|
||||
"label": "claude-3-sonnet",
|
||||
"name": "claude-3-sonnet-20240229",
|
||||
"description": "Ideal balance of intelligence and speed for enterprise workloads"
|
||||
},
|
||||
{
|
||||
"label": "claude-2",
|
||||
"name": "claude-2",
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@
|
|||
"data": {
|
||||
"id": "mrklAgentChat_0",
|
||||
"label": "ReAct Agent for Chat Models",
|
||||
"version": 3,
|
||||
"version": 4,
|
||||
"name": "mrklAgentChat",
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
|
||||
|
|
@ -78,9 +78,19 @@
|
|||
"name": "memory",
|
||||
"type": "BaseChatMemory",
|
||||
"id": "mrklAgentChat_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "mrklAgentChat_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{calculator_1.data.instance}}", "{{serper_0.data.instance}}"],
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"memory": "{{RedisBackedChatMemory_0.data.instance}}"
|
||||
|
|
|
|||
|
|
@ -249,7 +249,7 @@
|
|||
"data": {
|
||||
"id": "sqlDatabaseChain_0",
|
||||
"label": "Sql Database Chain",
|
||||
"version": 4,
|
||||
"version": 5,
|
||||
"name": "sqlDatabaseChain",
|
||||
"type": "SqlDatabaseChain",
|
||||
"baseClasses": ["SqlDatabaseChain", "BaseChain", "Runnable"],
|
||||
|
|
@ -347,9 +347,19 @@
|
|||
"name": "model",
|
||||
"type": "BaseLanguageModel",
|
||||
"id": "sqlDatabaseChain_0-input-model-BaseLanguageModel"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "sqlDatabaseChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"database": "sqlite",
|
||||
"url": "",
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
"data": {
|
||||
"id": "vectaraQAChain_0",
|
||||
"label": "Vectara QA Chain",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "vectaraQAChain",
|
||||
"type": "VectaraQAChain",
|
||||
"baseClasses": ["VectaraQAChain", "BaseChain", "Runnable"],
|
||||
|
|
@ -189,9 +189,19 @@
|
|||
"name": "vectaraStore",
|
||||
"type": "VectorStore",
|
||||
"id": "vectaraQAChain_0-input-vectaraStore-VectorStore"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "vectaraQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"vectaraStore": "{{vectara_1.data.instance}}",
|
||||
"summarizerPromptName": "vectara-experimental-summary-ext-2023-10-23-small",
|
||||
"responseLang": "eng",
|
||||
|
|
|
|||
|
|
@ -702,7 +702,7 @@
|
|||
"data": {
|
||||
"id": "conversationalAgent_0",
|
||||
"label": "Conversational Agent",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalAgent",
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
|
||||
|
|
@ -739,9 +739,19 @@
|
|||
"name": "memory",
|
||||
"type": "BaseChatMemory",
|
||||
"id": "conversationalAgent_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalAgent_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{webBrowser_0.data.instance}}"],
|
||||
"model": "{{chatOpenAI_1.data.instance}}",
|
||||
"memory": "{{bufferMemory_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@
|
|||
"data": {
|
||||
"id": "conversationalRetrievalQAChain_0",
|
||||
"label": "Conversational Retrieval QA Chain",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "conversationalRetrievalQAChain",
|
||||
"type": "ConversationalRetrievalQAChain",
|
||||
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
|
||||
|
|
@ -246,9 +246,19 @@
|
|||
"optional": true,
|
||||
"description": "If left empty, a default BufferMemory will be used",
|
||||
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{pinecone_0.data.instance}}",
|
||||
"memory": "{{RedisBackedChatMemory_0.data.instance}}",
|
||||
|
|
|
|||
|
|
@ -1371,13 +1371,12 @@ export class App {
|
|||
}
|
||||
templates.push(template)
|
||||
})
|
||||
const FlowiseDocsQnA = templates.find((tmp) => tmp.name === 'Flowise Docs QnA')
|
||||
const FlowiseDocsQnAIndex = templates.findIndex((tmp) => tmp.name === 'Flowise Docs QnA')
|
||||
if (FlowiseDocsQnA && FlowiseDocsQnAIndex > 0) {
|
||||
templates.splice(FlowiseDocsQnAIndex, 1)
|
||||
templates.unshift(FlowiseDocsQnA)
|
||||
const sortedTemplates = templates.sort((a, b) => a.templateName.localeCompare(b.templateName))
|
||||
const FlowiseDocsQnAIndex = sortedTemplates.findIndex((tmp) => tmp.templateName === 'Flowise Docs QnA')
|
||||
if (FlowiseDocsQnAIndex > 0) {
|
||||
sortedTemplates.unshift(sortedTemplates.splice(FlowiseDocsQnAIndex, 1)[0])
|
||||
}
|
||||
return res.json(templates.sort((a, b) => a.templateName.localeCompare(b.templateName)))
|
||||
return res.json(sortedTemplates)
|
||||
})
|
||||
|
||||
// ----------------------------------------
|
||||
|
|
@ -1534,7 +1533,7 @@ export class App {
|
|||
if (!chatflow) return `Chatflow ${chatflowid} not found`
|
||||
|
||||
const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent']
|
||||
const uploadProcessingNodes = ['chatOpenAI']
|
||||
const uploadProcessingNodes = ['chatOpenAI', 'chatAnthropic']
|
||||
|
||||
const flowObj = JSON.parse(chatflow.flowData)
|
||||
const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = []
|
||||
|
|
@ -1653,6 +1652,8 @@ export class App {
|
|||
const newChatMessage = new ChatMessage()
|
||||
Object.assign(newChatMessage, chatMessage)
|
||||
|
||||
if (!newChatMessage.createdDate) newChatMessage.createdDate = new Date()
|
||||
|
||||
const chatmessage = this.AppDataSource.getRepository(ChatMessage).create(newChatMessage)
|
||||
return await this.AppDataSource.getRepository(ChatMessage).save(chatmessage)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -533,11 +533,45 @@ export const getVariableValue = (
|
|||
variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters(convertChatHistoryToText(chatHistory), false)
|
||||
}
|
||||
|
||||
// Split by first occurrence of '.' to get just nodeId
|
||||
const [variableNodeId, _] = variableFullPath.split('.')
|
||||
// Resolve values with following case.
|
||||
// 1: <variableNodeId>.data.instance
|
||||
// 2: <variableNodeId>.data.instance.pathtokey
|
||||
const variableFullPathParts = variableFullPath.split('.')
|
||||
const variableNodeId = variableFullPathParts[0]
|
||||
const executedNode = reactFlowNodes.find((nd) => nd.id === variableNodeId)
|
||||
if (executedNode) {
|
||||
const variableValue = get(executedNode.data, 'instance')
|
||||
let variableValue = get(executedNode.data, 'instance')
|
||||
|
||||
// Handle path such as `<variableNodeId>.data.instance.key`
|
||||
if (variableFullPathParts.length > 3) {
|
||||
let variableObj = null
|
||||
switch (typeof variableValue) {
|
||||
case 'string': {
|
||||
const unEscapedVariableValue = handleEscapeCharacters(variableValue, true)
|
||||
if (unEscapedVariableValue.startsWith('{') && unEscapedVariableValue.endsWith('}')) {
|
||||
try {
|
||||
variableObj = JSON.parse(unEscapedVariableValue)
|
||||
} catch (e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'object': {
|
||||
variableObj = variableValue
|
||||
break
|
||||
}
|
||||
default:
|
||||
break
|
||||
}
|
||||
if (variableObj) {
|
||||
variableObj = get(variableObj, variableFullPathParts.slice(3))
|
||||
variableValue = handleEscapeCharacters(
|
||||
typeof variableObj === 'object' ? JSON.stringify(variableObj) : variableObj,
|
||||
false
|
||||
)
|
||||
}
|
||||
}
|
||||
if (isAcceptVariable) {
|
||||
variableDict[`{{${variableFullPath}}}`] = variableValue
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ const FormatPromptValuesDialog = ({ show, dialogProps, onChange, onCancel }) =>
|
|||
aria-describedby='alert-dialog-description'
|
||||
>
|
||||
<DialogTitle sx={{ fontSize: '1rem' }} id='alert-dialog-title'>
|
||||
Format Prompt Values
|
||||
{dialogProps.inputParam.label ?? 'Format Prompt Values'}
|
||||
</DialogTitle>
|
||||
<DialogContent>
|
||||
<PerfectScrollbar
|
||||
|
|
|
|||
Loading…
Reference in New Issue