Add input moderation for all chains and agents

This commit is contained in:
Octavian FlowiseAI 2024-03-09 23:19:39 +01:00
parent 6eab5cf681
commit 69e082e29f
41 changed files with 711 additions and 61 deletions

View File

@ -6,6 +6,8 @@ import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '..
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core' import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class Airtable_Agents implements INode { class Airtable_Agents implements INode {
label: string label: string
@ -22,7 +24,7 @@ class Airtable_Agents implements INode {
constructor() { constructor() {
this.label = 'Airtable Agent' this.label = 'Airtable Agent'
this.name = 'airtableAgent' this.name = 'airtableAgent'
this.version = 1.0 this.version = 2.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'airtable.svg' this.icon = 'airtable.svg'
@ -71,6 +73,14 @@ class Airtable_Agents implements INode {
default: 100, default: 100,
additionalParams: true, additionalParams: true,
description: 'Number of results to return' description: 'Number of results to return'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -80,12 +90,24 @@ class Airtable_Agents implements INode {
return undefined return undefined
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const model = nodeData.inputs?.model as BaseLanguageModel const model = nodeData.inputs?.model as BaseLanguageModel
const baseId = nodeData.inputs?.baseId as string const baseId = nodeData.inputs?.baseId as string
const tableId = nodeData.inputs?.tableId as string const tableId = nodeData.inputs?.tableId as string
const returnAll = nodeData.inputs?.returnAll as boolean const returnAll = nodeData.inputs?.returnAll as boolean
const limit = nodeData.inputs?.limit as string const limit = nodeData.inputs?.limit as string
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Vectara chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const accessToken = getCredentialParam('accessToken', credentialData, nodeData) const accessToken = getCredentialParam('accessToken', credentialData, nodeData)

View File

@ -7,6 +7,8 @@ import { PromptTemplate } from '@langchain/core/prompts'
import { AutoGPT } from 'langchain/experimental/autogpt' import { AutoGPT } from 'langchain/experimental/autogpt'
import { LLMChain } from 'langchain/chains' import { LLMChain } from 'langchain/chains'
import { INode, INodeData, INodeParams } from '../../../src/Interface' import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
type ObjectTool = StructuredTool type ObjectTool = StructuredTool
const FINISH_NAME = 'finish' const FINISH_NAME = 'finish'
@ -25,7 +27,7 @@ class AutoGPT_Agents implements INode {
constructor() { constructor() {
this.label = 'AutoGPT' this.label = 'AutoGPT'
this.name = 'autoGPT' this.name = 'autoGPT'
this.version = 1.0 this.version = 2.0
this.type = 'AutoGPT' this.type = 'AutoGPT'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'autogpt.svg' this.icon = 'autogpt.svg'
@ -68,6 +70,14 @@ class AutoGPT_Agents implements INode {
type: 'number', type: 'number',
default: 5, default: 5,
optional: true optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -92,9 +102,21 @@ class AutoGPT_Agents implements INode {
return autogpt return autogpt
} }
async run(nodeData: INodeData, input: string): Promise<string> { async run(nodeData: INodeData, input: string): Promise<string | object> {
const executor = nodeData.instance as AutoGPT const executor = nodeData.instance as AutoGPT
const model = nodeData.inputs?.model as BaseChatModel const model = nodeData.inputs?.model as BaseChatModel
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the AutoGPT agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
try { try {
let totalAssistantReply = '' let totalAssistantReply = ''

View File

@ -2,6 +2,8 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { VectorStore } from '@langchain/core/vectorstores' import { VectorStore } from '@langchain/core/vectorstores'
import { INode, INodeData, INodeParams } from '../../../src/Interface' import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { BabyAGI } from './core' import { BabyAGI } from './core'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class BabyAGI_Agents implements INode { class BabyAGI_Agents implements INode {
label: string label: string
@ -17,7 +19,7 @@ class BabyAGI_Agents implements INode {
constructor() { constructor() {
this.label = 'BabyAGI' this.label = 'BabyAGI'
this.name = 'babyAGI' this.name = 'babyAGI'
this.version = 1.0 this.version = 2.0
this.type = 'BabyAGI' this.type = 'BabyAGI'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'babyagi.svg' this.icon = 'babyagi.svg'
@ -39,6 +41,14 @@ class BabyAGI_Agents implements INode {
name: 'taskLoop', name: 'taskLoop',
type: 'number', type: 'number',
default: 3 default: 3
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -53,8 +63,21 @@ class BabyAGI_Agents implements INode {
return babyAgi return babyAgi
} }
async run(nodeData: INodeData, input: string): Promise<string> { async run(nodeData: INodeData, input: string): Promise<string | object> {
const executor = nodeData.instance as BabyAGI const executor = nodeData.instance as BabyAGI
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the BabyAGI agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const objective = input const objective = input
const res = await executor.call({ objective }) const res = await executor.call({ objective })

View File

@ -5,6 +5,8 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from
import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core' import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class CSV_Agents implements INode { class CSV_Agents implements INode {
label: string label: string
@ -20,7 +22,7 @@ class CSV_Agents implements INode {
constructor() { constructor() {
this.label = 'CSV Agent' this.label = 'CSV Agent'
this.name = 'csvAgent' this.name = 'csvAgent'
this.version = 1.0 this.version = 2.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'CSVagent.svg' this.icon = 'CSVagent.svg'
@ -47,6 +49,14 @@ class CSV_Agents implements INode {
optional: true, optional: true,
placeholder: placeholder:
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.' 'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -56,10 +66,22 @@ class CSV_Agents implements INode {
return undefined return undefined
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const csvFileBase64 = nodeData.inputs?.csvFile as string const csvFileBase64 = nodeData.inputs?.csvFile as string
const model = nodeData.inputs?.model as BaseLanguageModel const model = nodeData.inputs?.model as BaseLanguageModel
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the CSV agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)

View File

@ -13,6 +13,8 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams }
import { AgentExecutor } from '../../../src/agents' import { AgentExecutor } from '../../../src/agents'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils' import { addImagesToMessages } from '../../../src/multiModalUtils'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
@ -46,7 +48,7 @@ class ConversationalAgent_Agents implements INode {
constructor(fields?: { sessionId?: string }) { constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Agent' this.label = 'Conversational Agent'
this.name = 'conversationalAgent' this.name = 'conversationalAgent'
this.version = 2.0 this.version = 3.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'agent.svg' this.icon = 'agent.svg'
@ -77,6 +79,14 @@ class ConversationalAgent_Agents implements INode {
default: DEFAULT_PREFIX, default: DEFAULT_PREFIX,
optional: true, optional: true,
additionalParams: true additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
this.sessionId = fields?.sessionId this.sessionId = fields?.sessionId
@ -86,9 +96,20 @@ class ConversationalAgent_Agents implements INode {
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const memory = nodeData.inputs?.memory as FlowiseMemory const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the BabyAGI agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = await prepareAgent( const executor = await prepareAgent(
nodeData, nodeData,
options, options,

View File

@ -10,6 +10,8 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams }
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, formatAgentSteps } from '../../../src/agents' import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.` const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`
@ -28,7 +30,7 @@ class ConversationalRetrievalAgent_Agents implements INode {
constructor(fields?: { sessionId?: string }) { constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Retrieval Agent' this.label = 'Conversational Retrieval Agent'
this.name = 'conversationalRetrievalAgent' this.name = 'conversationalRetrievalAgent'
this.version = 3.0 this.version = 4.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'agent.svg' this.icon = 'agent.svg'
@ -59,6 +61,14 @@ class ConversationalRetrievalAgent_Agents implements INode {
rows: 4, rows: 4,
optional: true, optional: true,
additionalParams: true additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
this.sessionId = fields?.sessionId this.sessionId = fields?.sessionId
@ -68,8 +78,21 @@ class ConversationalRetrievalAgent_Agents implements INode {
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const memory = nodeData.inputs?.memory as FlowiseMemory const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the BabyAGI agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)

View File

@ -12,6 +12,8 @@ import { getBaseClasses } from '../../../src/utils'
import { createReactAgent } from '../../../src/agents' import { createReactAgent } from '../../../src/agents'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils' import { addImagesToMessages } from '../../../src/multiModalUtils'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class MRKLAgentChat_Agents implements INode { class MRKLAgentChat_Agents implements INode {
label: string label: string
@ -28,7 +30,7 @@ class MRKLAgentChat_Agents implements INode {
constructor(fields?: { sessionId?: string }) { constructor(fields?: { sessionId?: string }) {
this.label = 'ReAct Agent for Chat Models' this.label = 'ReAct Agent for Chat Models'
this.name = 'mrklAgentChat' this.name = 'mrklAgentChat'
this.version = 3.0 this.version = 4.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'agent.svg' this.icon = 'agent.svg'
@ -50,6 +52,14 @@ class MRKLAgentChat_Agents implements INode {
label: 'Memory', label: 'Memory',
name: 'memory', name: 'memory',
type: 'BaseChatMemory' type: 'BaseChatMemory'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
this.sessionId = fields?.sessionId this.sessionId = fields?.sessionId
@ -59,10 +69,22 @@ class MRKLAgentChat_Agents implements INode {
return null return null
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const memory = nodeData.inputs?.memory as FlowiseMemory const memory = nodeData.inputs?.memory as FlowiseMemory
const model = nodeData.inputs?.model as BaseChatModel const model = nodeData.inputs?.model as BaseChatModel
let tools = nodeData.inputs?.tools as Tool[] let tools = nodeData.inputs?.tools as Tool[]
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the ReAct Agent for Chat Models
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
tools = flatten(tools) tools = flatten(tools)
const prompt = await pull<PromptTemplate>('hwchase17/react-chat') const prompt = await pull<PromptTemplate>('hwchase17/react-chat')

View File

@ -8,6 +8,8 @@ import { additionalCallbacks } from '../../../src/handler'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { createReactAgent } from '../../../src/agents' import { createReactAgent } from '../../../src/agents'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class MRKLAgentLLM_Agents implements INode { class MRKLAgentLLM_Agents implements INode {
label: string label: string
@ -23,7 +25,7 @@ class MRKLAgentLLM_Agents implements INode {
constructor() { constructor() {
this.label = 'ReAct Agent for LLMs' this.label = 'ReAct Agent for LLMs'
this.name = 'mrklAgentLLM' this.name = 'mrklAgentLLM'
this.version = 1.0 this.version = 2.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'agent.svg' this.icon = 'agent.svg'
@ -40,6 +42,14 @@ class MRKLAgentLLM_Agents implements INode {
label: 'Language Model', label: 'Language Model',
name: 'model', name: 'model',
type: 'BaseLanguageModel' type: 'BaseLanguageModel'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -48,9 +58,22 @@ class MRKLAgentLLM_Agents implements INode {
return null return null
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const model = nodeData.inputs?.model as BaseLanguageModel const model = nodeData.inputs?.model as BaseLanguageModel
let tools = nodeData.inputs?.tools as Tool[] let tools = nodeData.inputs?.tools as Tool[]
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the ReAct Agent for LLMs
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
tools = flatten(tools) tools = flatten(tools)
const prompt = await pull<PromptTemplate>('hwchase17/react') const prompt = await pull<PromptTemplate>('hwchase17/react')

View File

@ -10,6 +10,8 @@ import { getBaseClasses } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, formatAgentSteps } from '../../../src/agents' import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
import { Moderation, checkInputs } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class OpenAIFunctionAgent_Agents implements INode { class OpenAIFunctionAgent_Agents implements INode {
label: string label: string
@ -26,7 +28,7 @@ class OpenAIFunctionAgent_Agents implements INode {
constructor(fields?: { sessionId?: string }) { constructor(fields?: { sessionId?: string }) {
this.label = 'OpenAI Function Agent' this.label = 'OpenAI Function Agent'
this.name = 'openAIFunctionAgent' this.name = 'openAIFunctionAgent'
this.version = 3.0 this.version = 4.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'function.svg' this.icon = 'function.svg'
@ -56,6 +58,14 @@ class OpenAIFunctionAgent_Agents implements INode {
rows: 4, rows: 4,
optional: true, optional: true,
additionalParams: true additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
this.sessionId = fields?.sessionId this.sessionId = fields?.sessionId
@ -67,6 +77,19 @@ class OpenAIFunctionAgent_Agents implements INode {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAI Function Agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)

View File

@ -11,7 +11,8 @@ import { getBaseClasses } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor } from '../../../src/agents' import { AgentExecutor } from '../../../src/agents'
//import { AgentExecutor } from "langchain/agents"; import { Moderation, checkInputs } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
const defaultSystemMessage = `You are a helpful assistant. Help the user answer any questions. const defaultSystemMessage = `You are a helpful assistant. Help the user answer any questions.
@ -52,7 +53,7 @@ class XMLAgent_Agents implements INode {
constructor(fields?: { sessionId?: string }) { constructor(fields?: { sessionId?: string }) {
this.label = 'XML Agent' this.label = 'XML Agent'
this.name = 'xmlAgent' this.name = 'xmlAgent'
this.version = 1.0 this.version = 2.0
this.type = 'XMLAgent' this.type = 'XMLAgent'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'xmlagent.svg' this.icon = 'xmlagent.svg'
@ -83,6 +84,14 @@ class XMLAgent_Agents implements INode {
rows: 4, rows: 4,
default: defaultSystemMessage, default: defaultSystemMessage,
additionalParams: true additionalParams: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
this.sessionId = fields?.sessionId this.sessionId = fields?.sessionId
@ -94,6 +103,18 @@ class XMLAgent_Agents implements INode {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAI Function Agent
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)

View File

@ -3,6 +3,8 @@ import { APIChain, createOpenAPIChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class OpenApiChain_Chains implements INode { class OpenApiChain_Chains implements INode {
label: string label: string
@ -18,7 +20,7 @@ class OpenApiChain_Chains implements INode {
constructor() { constructor() {
this.label = 'OpenAPI Chain' this.label = 'OpenAPI Chain'
this.name = 'openApiChain' this.name = 'openApiChain'
this.version = 1.0 this.version = 2.0
this.type = 'OpenAPIChain' this.type = 'OpenAPIChain'
this.icon = 'openapi.svg' this.icon = 'openapi.svg'
this.category = 'Chains' this.category = 'Chains'
@ -50,6 +52,14 @@ class OpenApiChain_Chains implements INode {
type: 'json', type: 'json',
additionalParams: true, additionalParams: true,
optional: true optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -58,11 +68,21 @@ class OpenApiChain_Chains implements INode {
return await initChain(nodeData) return await initChain(nodeData)
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const chain = await initChain(nodeData) const chain = await initChain(nodeData)
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options)
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the OpenAPI chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
if (options.socketIO && options.socketIOClientId) { if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
const res = await chain.run(input, [loggerHandler, handler, ...callbacks]) const res = await chain.run(input, [loggerHandler, handler, ...callbacks])

View File

@ -5,6 +5,8 @@ import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from '@langch
import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from '@langchain/core/runnables' import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from '@langchain/core/runnables'
import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages' import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages'
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { StringOutputParser } from '@langchain/core/output_parsers' import { StringOutputParser } from '@langchain/core/output_parsers'
import type { Document } from '@langchain/core/documents' import type { Document } from '@langchain/core/documents'
import { BufferMemoryInput } from 'langchain/memory' import { BufferMemoryInput } from 'langchain/memory'
@ -36,7 +38,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
constructor(fields?: { sessionId?: string }) { constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Retrieval QA Chain' this.label = 'Conversational Retrieval QA Chain'
this.name = 'conversationalRetrievalQAChain' this.name = 'conversationalRetrievalQAChain'
this.version = 2.0 this.version = 3.0
this.type = 'ConversationalRetrievalQAChain' this.type = 'ConversationalRetrievalQAChain'
this.icon = 'qa.svg' this.icon = 'qa.svg'
this.category = 'Chains' this.category = 'Chains'
@ -87,6 +89,14 @@ class ConversationalRetrievalQAChain_Chains implements INode {
additionalParams: true, additionalParams: true,
optional: true, optional: true,
default: RESPONSE_TEMPLATE default: RESPONSE_TEMPLATE
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
/** Deprecated /** Deprecated
{ {
@ -163,6 +173,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
} }
let memory: FlowiseMemory | undefined = externalMemory let memory: FlowiseMemory | undefined = externalMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (!memory) { if (!memory) {
memory = new BufferMemory({ memory = new BufferMemory({
returnMessages: true, returnMessages: true,
@ -171,6 +182,16 @@ class ConversationalRetrievalQAChain_Chains implements INode {
}) })
} }
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Conversational Retrieval QA Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt) const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt)
const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? [] const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? []

View File

@ -3,6 +3,8 @@ import { MultiPromptChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeParams, PromptRetriever } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams, PromptRetriever } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class MultiPromptChain_Chains implements INode { class MultiPromptChain_Chains implements INode {
label: string label: string
@ -18,7 +20,7 @@ class MultiPromptChain_Chains implements INode {
constructor() { constructor() {
this.label = 'Multi Prompt Chain' this.label = 'Multi Prompt Chain'
this.name = 'multiPromptChain' this.name = 'multiPromptChain'
this.version = 1.0 this.version = 2.0
this.type = 'MultiPromptChain' this.type = 'MultiPromptChain'
this.icon = 'prompt.svg' this.icon = 'prompt.svg'
this.category = 'Chains' this.category = 'Chains'
@ -35,6 +37,14 @@ class MultiPromptChain_Chains implements INode {
name: 'promptRetriever', name: 'promptRetriever',
type: 'PromptRetriever', type: 'PromptRetriever',
list: true list: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -62,8 +72,19 @@ class MultiPromptChain_Chains implements INode {
return chain return chain
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const chain = nodeData.instance as MultiPromptChain const chain = nodeData.instance as MultiPromptChain
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Multi Prompt Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const obj = { input } const obj = { input }
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)

View File

@ -3,6 +3,8 @@ import { MultiRetrievalQAChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeParams, VectorStoreRetriever } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams, VectorStoreRetriever } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class MultiRetrievalQAChain_Chains implements INode { class MultiRetrievalQAChain_Chains implements INode {
label: string label: string
@ -18,7 +20,7 @@ class MultiRetrievalQAChain_Chains implements INode {
constructor() { constructor() {
this.label = 'Multi Retrieval QA Chain' this.label = 'Multi Retrieval QA Chain'
this.name = 'multiRetrievalQAChain' this.name = 'multiRetrievalQAChain'
this.version = 1.0 this.version = 2.0
this.type = 'MultiRetrievalQAChain' this.type = 'MultiRetrievalQAChain'
this.icon = 'qa.svg' this.icon = 'qa.svg'
this.category = 'Chains' this.category = 'Chains'
@ -41,6 +43,14 @@ class MultiRetrievalQAChain_Chains implements INode {
name: 'returnSourceDocuments', name: 'returnSourceDocuments',
type: 'boolean', type: 'boolean',
optional: true optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -72,7 +82,17 @@ class MultiRetrievalQAChain_Chains implements INode {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const chain = nodeData.instance as MultiRetrievalQAChain const chain = nodeData.instance as MultiRetrievalQAChain
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Multi Retrieval QA Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const obj = { input } const obj = { input }
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options)

View File

@ -4,6 +4,8 @@ import { RetrievalQAChain } from 'langchain/chains'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class RetrievalQAChain_Chains implements INode { class RetrievalQAChain_Chains implements INode {
label: string label: string
@ -19,7 +21,7 @@ class RetrievalQAChain_Chains implements INode {
constructor() { constructor() {
this.label = 'Retrieval QA Chain' this.label = 'Retrieval QA Chain'
this.name = 'retrievalQAChain' this.name = 'retrievalQAChain'
this.version = 1.0 this.version = 2.0
this.type = 'RetrievalQAChain' this.type = 'RetrievalQAChain'
this.icon = 'qa.svg' this.icon = 'qa.svg'
this.category = 'Chains' this.category = 'Chains'
@ -35,6 +37,14 @@ class RetrievalQAChain_Chains implements INode {
label: 'Vector Store Retriever', label: 'Vector Store Retriever',
name: 'vectorStoreRetriever', name: 'vectorStoreRetriever',
type: 'BaseRetriever' type: 'BaseRetriever'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -47,8 +57,19 @@ class RetrievalQAChain_Chains implements INode {
return chain return chain
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const chain = nodeData.instance as RetrievalQAChain const chain = nodeData.instance as RetrievalQAChain
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Retrieval QA Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const obj = { const obj = {
query: input query: input
} }

View File

@ -7,6 +7,8 @@ import { SqlDatabase } from 'langchain/sql_db'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, getInputVariables } from '../../../src/utils' import { getBaseClasses, getInputVariables } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
type DatabaseType = 'sqlite' | 'postgres' | 'mssql' | 'mysql' type DatabaseType = 'sqlite' | 'postgres' | 'mssql' | 'mysql'
@ -24,7 +26,7 @@ class SqlDatabaseChain_Chains implements INode {
constructor() { constructor() {
this.label = 'Sql Database Chain' this.label = 'Sql Database Chain'
this.name = 'sqlDatabaseChain' this.name = 'sqlDatabaseChain'
this.version = 4.0 this.version = 5.0
this.type = 'SqlDatabaseChain' this.type = 'SqlDatabaseChain'
this.icon = 'sqlchain.svg' this.icon = 'sqlchain.svg'
this.category = 'Chains' this.category = 'Chains'
@ -115,6 +117,14 @@ class SqlDatabaseChain_Chains implements INode {
placeholder: DEFAULT_SQL_DATABASE_PROMPT.template + DEFAULT_SQL_DATABASE_PROMPT.templateFormat, placeholder: DEFAULT_SQL_DATABASE_PROMPT.template + DEFAULT_SQL_DATABASE_PROMPT.templateFormat,
additionalParams: true, additionalParams: true,
optional: true optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -144,7 +154,7 @@ class SqlDatabaseChain_Chains implements INode {
return chain return chain
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const databaseType = nodeData.inputs?.database as DatabaseType const databaseType = nodeData.inputs?.database as DatabaseType
const model = nodeData.inputs?.model as BaseLanguageModel const model = nodeData.inputs?.model as BaseLanguageModel
const url = nodeData.inputs?.url as string const url = nodeData.inputs?.url as string
@ -155,6 +165,17 @@ class SqlDatabaseChain_Chains implements INode {
const sampleRowsInTableInfo = nodeData.inputs?.sampleRowsInTableInfo as number const sampleRowsInTableInfo = nodeData.inputs?.sampleRowsInTableInfo as number
const topK = nodeData.inputs?.topK as number const topK = nodeData.inputs?.topK as number
const customPrompt = nodeData.inputs?.customPrompt as string const customPrompt = nodeData.inputs?.customPrompt as string
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Sql Database Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const chain = await getSQLDBChain( const chain = await getSQLDBChain(
databaseType, databaseType,

View File

@ -4,6 +4,8 @@ import { VectaraStore } from '@langchain/community/vectorstores/vectara'
import { VectorDBQAChain } from 'langchain/chains' import { VectorDBQAChain } from 'langchain/chains'
import { INode, INodeData, INodeParams } from '../../../src/Interface' import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
// functionality based on https://github.com/vectara/vectara-answer // functionality based on https://github.com/vectara/vectara-answer
const reorderCitations = (unorderedSummary: string) => { const reorderCitations = (unorderedSummary: string) => {
@ -48,7 +50,7 @@ class VectaraChain_Chains implements INode {
constructor() { constructor() {
this.label = 'Vectara QA Chain' this.label = 'Vectara QA Chain'
this.name = 'vectaraQAChain' this.name = 'vectaraQAChain'
this.version = 1.0 this.version = 2.0
this.type = 'VectaraQAChain' this.type = 'VectaraQAChain'
this.icon = 'vectara.png' this.icon = 'vectara.png'
this.category = 'Chains' this.category = 'Chains'
@ -219,6 +221,14 @@ class VectaraChain_Chains implements INode {
description: 'Maximum results used to build the summarized response', description: 'Maximum results used to build the summarized response',
type: 'number', type: 'number',
default: 7 default: 7
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -227,7 +237,7 @@ class VectaraChain_Chains implements INode {
return null return null
} }
async run(nodeData: INodeData, input: string): Promise<object> { async run(nodeData: INodeData, input: string): Promise<string | object> {
const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore
const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng' const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng'
const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string
@ -252,6 +262,18 @@ class VectaraChain_Chains implements INode {
const mmrRerankerId = 272725718 const mmrRerankerId = 272725718
const mmrEnabled = vectaraFilter?.mmrConfig?.enabled const mmrEnabled = vectaraFilter?.mmrConfig?.enabled
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the Vectara chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const data = { const data = {
query: [ query: [
{ {

View File

@ -4,6 +4,8 @@ import { VectorDBQAChain } from 'langchain/chains'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { checkInputs, Moderation } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
class VectorDBQAChain_Chains implements INode { class VectorDBQAChain_Chains implements INode {
label: string label: string
@ -19,7 +21,7 @@ class VectorDBQAChain_Chains implements INode {
constructor() { constructor() {
this.label = 'VectorDB QA Chain' this.label = 'VectorDB QA Chain'
this.name = 'vectorDBQAChain' this.name = 'vectorDBQAChain'
this.version = 1.0 this.version = 2.0
this.type = 'VectorDBQAChain' this.type = 'VectorDBQAChain'
this.icon = 'vectordb.svg' this.icon = 'vectordb.svg'
this.category = 'Chains' this.category = 'Chains'
@ -35,6 +37,14 @@ class VectorDBQAChain_Chains implements INode {
label: 'Vector Store', label: 'Vector Store',
name: 'vectorStore', name: 'vectorStore',
type: 'VectorStore' type: 'VectorStore'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
} }
] ]
} }
@ -50,8 +60,20 @@ class VectorDBQAChain_Chains implements INode {
return chain return chain
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const chain = nodeData.instance as VectorDBQAChain const chain = nodeData.instance as VectorDBQAChain
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the VectorDB QA Chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
//streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const obj = { const obj = {
query: input query: input
} }

View File

@ -15,7 +15,7 @@
"data": { "data": {
"id": "openApiChain_1", "id": "openApiChain_1",
"label": "OpenAPI Chain", "label": "OpenAPI Chain",
"version": 1, "version": 2,
"name": "openApiChain", "name": "openApiChain",
"type": "OpenAPIChain", "type": "OpenAPIChain",
"baseClasses": ["OpenAPIChain", "BaseChain"], "baseClasses": ["OpenAPIChain", "BaseChain"],
@ -53,9 +53,19 @@
"name": "model", "name": "model",
"type": "ChatOpenAI", "type": "ChatOpenAI",
"id": "openApiChain_1-input-model-ChatOpenAI" "id": "openApiChain_1-input-model-ChatOpenAI"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "openApiChain_1-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_1.data.instance}}", "model": "{{chatOpenAI_1.data.instance}}",
"yamlLink": "https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml", "yamlLink": "https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml",
"headers": "" "headers": ""
@ -399,7 +409,7 @@
"id": "openAIFunctionAgent_0", "id": "openAIFunctionAgent_0",
"label": "OpenAI Function Agent", "label": "OpenAI Function Agent",
"name": "openAIFunctionAgent", "name": "openAIFunctionAgent",
"version": 3, "version": 4,
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain"], "baseClasses": ["AgentExecutor", "BaseChain"],
"category": "Agents", "category": "Agents",
@ -434,9 +444,19 @@
"name": "model", "name": "model",
"type": "BaseChatModel", "type": "BaseChatModel",
"id": "openAIFunctionAgent_0-input-model-BaseChatModel" "id": "openAIFunctionAgent_0-input-model-BaseChatModel"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "openAIFunctionAgent_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{chainTool_0.data.instance}}"], "tools": ["{{chainTool_0.data.instance}}"],
"memory": "{{bufferMemory_0.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}",
"model": "{{chatOpenAI_2.data.instance}}", "model": "{{chatOpenAI_2.data.instance}}",

View File

@ -1100,7 +1100,7 @@
"data": { "data": {
"id": "conversationalAgent_0", "id": "conversationalAgent_0",
"label": "Conversational Agent", "label": "Conversational Agent",
"version": 2, "version": 3,
"name": "conversationalAgent", "name": "conversationalAgent",
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
@ -1137,9 +1137,19 @@
"name": "memory", "name": "memory",
"type": "BaseChatMemory", "type": "BaseChatMemory",
"id": "conversationalAgent_0-input-memory-BaseChatMemory" "id": "conversationalAgent_0-input-memory-BaseChatMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalAgent_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"], "tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"],
"model": "{{chatOpenAI_3.data.instance}}", "model": "{{chatOpenAI_3.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}",

View File

@ -15,7 +15,7 @@
"data": { "data": {
"id": "autoGPT_0", "id": "autoGPT_0",
"label": "AutoGPT", "label": "AutoGPT",
"version": 1, "version": 2,
"name": "autoGPT", "name": "autoGPT",
"type": "AutoGPT", "type": "AutoGPT",
"baseClasses": ["AutoGPT"], "baseClasses": ["AutoGPT"],
@ -66,9 +66,19 @@
"name": "vectorStoreRetriever", "name": "vectorStoreRetriever",
"type": "BaseRetriever", "type": "BaseRetriever",
"id": "autoGPT_0-input-vectorStoreRetriever-BaseRetriever" "id": "autoGPT_0-input-vectorStoreRetriever-BaseRetriever"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "autoGPT_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{readFile_0.data.instance}}", "{{writeFile_1.data.instance}}", "{{serpAPI_0.data.instance}}"], "tools": ["{{readFile_0.data.instance}}", "{{writeFile_1.data.instance}}", "{{serpAPI_0.data.instance}}"],
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{pinecone_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}",

View File

@ -15,7 +15,7 @@
"data": { "data": {
"id": "babyAGI_1", "id": "babyAGI_1",
"label": "BabyAGI", "label": "BabyAGI",
"version": 1, "version": 2,
"name": "babyAGI", "name": "babyAGI",
"type": "BabyAGI", "type": "BabyAGI",
"baseClasses": ["BabyAGI"], "baseClasses": ["BabyAGI"],
@ -42,9 +42,19 @@
"name": "vectorStore", "name": "vectorStore",
"type": "VectorStore", "type": "VectorStore",
"id": "babyAGI_1-input-vectorStore-VectorStore" "id": "babyAGI_1-input-vectorStore-VectorStore"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "babyAGI_1-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStore": "{{pinecone_0.data.instance}}", "vectorStore": "{{pinecone_0.data.instance}}",
"taskLoop": 3 "taskLoop": 3

View File

@ -16,7 +16,7 @@
"id": "csvAgent_0", "id": "csvAgent_0",
"label": "CSV Agent", "label": "CSV Agent",
"name": "csvAgent", "name": "csvAgent",
"version": 1, "version": 2,
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain"], "baseClasses": ["AgentExecutor", "BaseChain"],
"category": "Agents", "category": "Agents",
@ -36,9 +36,19 @@
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseLanguageModel",
"id": "csvAgent_0-input-model-BaseLanguageModel" "id": "csvAgent_0-input-model-BaseLanguageModel"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "csvAgent_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}" "model": "{{chatOpenAI_0.data.instance}}"
}, },
"outputAnchors": [ "outputAnchors": [

View File

@ -15,7 +15,7 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 2, "version": 3,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
@ -74,9 +74,19 @@
"optional": true, "optional": true,
"description": "If left empty, a default BufferMemory will be used", "description": "If left empty, a default BufferMemory will be used",
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}",
"memory": "", "memory": "",

View File

@ -451,7 +451,7 @@
"id": "mrklAgentChat_0", "id": "mrklAgentChat_0",
"label": "MRKL Agent for Chat Models", "label": "MRKL Agent for Chat Models",
"name": "mrklAgentChat", "name": "mrklAgentChat",
"version": 1, "version": 2,
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain"], "baseClasses": ["AgentExecutor", "BaseChain"],
"category": "Agents", "category": "Agents",
@ -470,9 +470,19 @@
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseLanguageModel",
"id": "mrklAgentChat_0-input-model-BaseLanguageModel" "id": "mrklAgentChat_0-input-model-BaseLanguageModel"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "mrklAgentChat_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{requestsGet_0.data.instance}}", "{{requestsPost_0.data.instance}}", "{{aiPlugin_0.data.instance}}"], "tools": ["{{requestsGet_0.data.instance}}", "{{requestsPost_0.data.instance}}", "{{aiPlugin_0.data.instance}}"],
"model": "{{chatOpenAI_0.data.instance}}" "model": "{{chatOpenAI_0.data.instance}}"
}, },

View File

@ -392,7 +392,7 @@
"data": { "data": {
"id": "conversationalAgent_0", "id": "conversationalAgent_0",
"label": "Conversational Agent", "label": "Conversational Agent",
"version": 2, "version": 3,
"name": "conversationalAgent", "name": "conversationalAgent",
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
@ -429,9 +429,19 @@
"name": "memory", "name": "memory",
"type": "BaseChatMemory", "type": "BaseChatMemory",
"id": "conversationalAgent_0-input-memory-BaseChatMemory" "id": "conversationalAgent_0-input-memory-BaseChatMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalAgent_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{calculator_1.data.instance}}", "{{serpAPI_0.data.instance}}"], "tools": ["{{calculator_1.data.instance}}", "{{serpAPI_0.data.instance}}"],
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"memory": "{{bufferMemory_1.data.instance}}", "memory": "{{bufferMemory_1.data.instance}}",

View File

@ -123,7 +123,7 @@
"data": { "data": {
"id": "conversationalRetrievalAgent_0", "id": "conversationalRetrievalAgent_0",
"label": "Conversational Retrieval Agent", "label": "Conversational Retrieval Agent",
"version": 3, "version": 4,
"name": "conversationalRetrievalAgent", "name": "conversationalRetrievalAgent",
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
@ -159,9 +159,19 @@
"name": "model", "name": "model",
"type": "BaseChatModel", "type": "BaseChatModel",
"id": "conversationalRetrievalAgent_0-input-model-BaseChatModel" "id": "conversationalRetrievalAgent_0-input-model-BaseChatModel"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalRetrievalAgent_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{retrieverTool_0.data.instance}}"], "tools": ["{{retrieverTool_0.data.instance}}"],
"memory": "{{bufferMemory_0.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",

View File

@ -274,7 +274,7 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 2, "version": 3,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
@ -333,9 +333,19 @@
"optional": true, "optional": true,
"description": "If left empty, a default BufferMemory will be used", "description": "If left empty, a default BufferMemory will be used",
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{pinecone_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}",
"memory": "", "memory": "",

View File

@ -158,7 +158,7 @@
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"version": 2, "version": 3,
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
"category": "Chains", "category": "Chains",
@ -216,9 +216,19 @@
"optional": true, "optional": true,
"description": "If left empty, a default BufferMemory will be used", "description": "If left empty, a default BufferMemory will be used",
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}",
"memory": "", "memory": "",

View File

@ -85,7 +85,7 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 2, "version": 3,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
@ -144,9 +144,19 @@
"optional": true, "optional": true,
"description": "If left empty, a default BufferMemory will be used", "description": "If left empty, a default BufferMemory will be used",
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOllama_0.data.instance}}", "model": "{{chatOllama_0.data.instance}}",
"vectorStoreRetriever": "{{faiss_0.data.instance}}", "vectorStoreRetriever": "{{faiss_0.data.instance}}",
"memory": "", "memory": "",

View File

@ -15,7 +15,7 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 2, "version": 3,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
@ -74,9 +74,19 @@
"optional": true, "optional": true,
"description": "If left empty, a default BufferMemory will be used", "description": "If left empty, a default BufferMemory will be used",
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{qdrant_0.data.instance}}", "vectorStoreRetriever": "{{qdrant_0.data.instance}}",
"memory": "{{ZepMemory_0.data.instance}}", "memory": "{{ZepMemory_0.data.instance}}",

View File

@ -251,7 +251,7 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 2, "version": 3,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
@ -310,9 +310,19 @@
"optional": true, "optional": true,
"description": "If left empty, a default BufferMemory will be used", "description": "If left empty, a default BufferMemory will be used",
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{pinecone_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}",
"rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",

View File

@ -84,7 +84,7 @@
"id": "multiPromptChain_0", "id": "multiPromptChain_0",
"label": "Multi Prompt Chain", "label": "Multi Prompt Chain",
"name": "multiPromptChain", "name": "multiPromptChain",
"version": 1, "version": 2,
"type": "MultiPromptChain", "type": "MultiPromptChain",
"baseClasses": ["MultiPromptChain", "MultiRouteChain", "BaseChain", "BaseLangChain"], "baseClasses": ["MultiPromptChain", "MultiRouteChain", "BaseChain", "BaseLangChain"],
"category": "Chains", "category": "Chains",
@ -103,9 +103,19 @@
"type": "PromptRetriever", "type": "PromptRetriever",
"list": true, "list": true,
"id": "multiPromptChain_0-input-promptRetriever-PromptRetriever" "id": "multiPromptChain_0-input-promptRetriever-PromptRetriever"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "multiPromptChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"promptRetriever": [ "promptRetriever": [
"{{promptRetriever_0.data.instance}}", "{{promptRetriever_0.data.instance}}",

View File

@ -82,7 +82,7 @@
"data": { "data": {
"id": "multiRetrievalQAChain_0", "id": "multiRetrievalQAChain_0",
"label": "Multi Retrieval QA Chain", "label": "Multi Retrieval QA Chain",
"version": 1, "version": 2,
"name": "multiRetrievalQAChain", "name": "multiRetrievalQAChain",
"type": "MultiRetrievalQAChain", "type": "MultiRetrievalQAChain",
"baseClasses": ["MultiRetrievalQAChain", "MultiRouteChain", "BaseChain", "BaseLangChain"], "baseClasses": ["MultiRetrievalQAChain", "MultiRouteChain", "BaseChain", "BaseLangChain"],
@ -109,9 +109,19 @@
"type": "VectorStoreRetriever", "type": "VectorStoreRetriever",
"list": true, "list": true,
"id": "multiRetrievalQAChain_0-input-vectorStoreRetriever-VectorStoreRetriever" "id": "multiRetrievalQAChain_0-input-vectorStoreRetriever-VectorStoreRetriever"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "multiRetrievalQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": [ "vectorStoreRetriever": [
"{{vectorStoreRetriever_0.data.instance}}", "{{vectorStoreRetriever_0.data.instance}}",

View File

@ -163,7 +163,7 @@
"data": { "data": {
"id": "retrievalQAChain_0", "id": "retrievalQAChain_0",
"label": "Retrieval QA Chain", "label": "Retrieval QA Chain",
"version": 1, "version": 2,
"name": "retrievalQAChain", "name": "retrievalQAChain",
"type": "RetrievalQAChain", "type": "RetrievalQAChain",
"baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"], "baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"],
@ -182,9 +182,19 @@
"name": "vectorStoreRetriever", "name": "vectorStoreRetriever",
"type": "BaseRetriever", "type": "BaseRetriever",
"id": "retrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever" "id": "retrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "retrievalQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{redis_0.data.instance}}" "vectorStoreRetriever": "{{redis_0.data.instance}}"
}, },
@ -218,7 +228,7 @@
"data": { "data": {
"id": "retrievalQAChain_1", "id": "retrievalQAChain_1",
"label": "Retrieval QA Chain", "label": "Retrieval QA Chain",
"version": 1, "version": 2,
"name": "retrievalQAChain", "name": "retrievalQAChain",
"type": "RetrievalQAChain", "type": "RetrievalQAChain",
"baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"], "baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"],
@ -237,9 +247,19 @@
"name": "vectorStoreRetriever", "name": "vectorStoreRetriever",
"type": "BaseRetriever", "type": "BaseRetriever",
"id": "retrievalQAChain_1-input-vectorStoreRetriever-BaseRetriever" "id": "retrievalQAChain_1-input-vectorStoreRetriever-BaseRetriever"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "retrievalQAChain_1-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_1.data.instance}}", "model": "{{chatOpenAI_1.data.instance}}",
"vectorStoreRetriever": "{{faiss_0.data.instance}}" "vectorStoreRetriever": "{{faiss_0.data.instance}}"
}, },
@ -1741,7 +1761,7 @@
"data": { "data": {
"id": "conversationalAgent_0", "id": "conversationalAgent_0",
"label": "Conversational Agent", "label": "Conversational Agent",
"version": 2, "version": 3,
"name": "conversationalAgent", "name": "conversationalAgent",
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
@ -1778,9 +1798,19 @@
"name": "memory", "name": "memory",
"type": "BaseChatMemory", "type": "BaseChatMemory",
"id": "conversationalAgent_0-input-memory-BaseChatMemory" "id": "conversationalAgent_0-input-memory-BaseChatMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalAgent_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{chainTool_2.data.instance}}", "{{chainTool_3.data.instance}}"], "tools": ["{{chainTool_2.data.instance}}", "{{chainTool_3.data.instance}}"],
"model": "{{chatOpenAI_2.data.instance}}", "model": "{{chatOpenAI_2.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}",

View File

@ -208,7 +208,7 @@
"id": "openAIFunctionAgent_0", "id": "openAIFunctionAgent_0",
"label": "OpenAI Function Agent", "label": "OpenAI Function Agent",
"name": "openAIFunctionAgent", "name": "openAIFunctionAgent",
"version": 3, "version": 4,
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain"], "baseClasses": ["AgentExecutor", "BaseChain"],
"category": "Agents", "category": "Agents",
@ -243,9 +243,19 @@
"name": "model", "name": "model",
"type": "BaseChatModel", "type": "BaseChatModel",
"id": "openAIFunctionAgent_0-input-model-BaseChatModel" "id": "openAIFunctionAgent_0-input-model-BaseChatModel"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "openAIFunctionAgent_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{calculator_0.data.instance}}", "{{serper_0.data.instance}}", "{{customTool_0.data.instance}}"], "tools": ["{{calculator_0.data.instance}}", "{{serper_0.data.instance}}", "{{customTool_0.data.instance}}"],
"memory": "{{bufferMemory_0.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",

View File

@ -52,7 +52,7 @@
"data": { "data": {
"id": "mrklAgentChat_0", "id": "mrklAgentChat_0",
"label": "ReAct Agent for Chat Models", "label": "ReAct Agent for Chat Models",
"version": 3, "version": 4,
"name": "mrklAgentChat", "name": "mrklAgentChat",
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
@ -78,9 +78,19 @@
"name": "memory", "name": "memory",
"type": "BaseChatMemory", "type": "BaseChatMemory",
"id": "mrklAgentChat_0-input-memory-BaseChatMemory" "id": "mrklAgentChat_0-input-memory-BaseChatMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "mrklAgentChat_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{calculator_1.data.instance}}", "{{serper_0.data.instance}}"], "tools": ["{{calculator_1.data.instance}}", "{{serper_0.data.instance}}"],
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"memory": "{{RedisBackedChatMemory_0.data.instance}}" "memory": "{{RedisBackedChatMemory_0.data.instance}}"

View File

@ -249,7 +249,7 @@
"data": { "data": {
"id": "sqlDatabaseChain_0", "id": "sqlDatabaseChain_0",
"label": "Sql Database Chain", "label": "Sql Database Chain",
"version": 4, "version": 5,
"name": "sqlDatabaseChain", "name": "sqlDatabaseChain",
"type": "SqlDatabaseChain", "type": "SqlDatabaseChain",
"baseClasses": ["SqlDatabaseChain", "BaseChain", "Runnable"], "baseClasses": ["SqlDatabaseChain", "BaseChain", "Runnable"],
@ -347,9 +347,19 @@
"name": "model", "name": "model",
"type": "BaseLanguageModel", "type": "BaseLanguageModel",
"id": "sqlDatabaseChain_0-input-model-BaseLanguageModel" "id": "sqlDatabaseChain_0-input-model-BaseLanguageModel"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "sqlDatabaseChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"database": "sqlite", "database": "sqlite",
"url": "", "url": "",

View File

@ -15,7 +15,7 @@
"data": { "data": {
"id": "vectaraQAChain_0", "id": "vectaraQAChain_0",
"label": "Vectara QA Chain", "label": "Vectara QA Chain",
"version": 1, "version": 2,
"name": "vectaraQAChain", "name": "vectaraQAChain",
"type": "VectaraQAChain", "type": "VectaraQAChain",
"baseClasses": ["VectaraQAChain", "BaseChain", "Runnable"], "baseClasses": ["VectaraQAChain", "BaseChain", "Runnable"],
@ -189,9 +189,19 @@
"name": "vectaraStore", "name": "vectaraStore",
"type": "VectorStore", "type": "VectorStore",
"id": "vectaraQAChain_0-input-vectaraStore-VectorStore" "id": "vectaraQAChain_0-input-vectaraStore-VectorStore"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "vectaraQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"vectaraStore": "{{vectara_1.data.instance}}", "vectaraStore": "{{vectara_1.data.instance}}",
"summarizerPromptName": "vectara-experimental-summary-ext-2023-10-23-small", "summarizerPromptName": "vectara-experimental-summary-ext-2023-10-23-small",
"responseLang": "eng", "responseLang": "eng",

View File

@ -702,7 +702,7 @@
"data": { "data": {
"id": "conversationalAgent_0", "id": "conversationalAgent_0",
"label": "Conversational Agent", "label": "Conversational Agent",
"version": 2, "version": 3,
"name": "conversationalAgent", "name": "conversationalAgent",
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
@ -739,9 +739,19 @@
"name": "memory", "name": "memory",
"type": "BaseChatMemory", "type": "BaseChatMemory",
"id": "conversationalAgent_0-input-memory-BaseChatMemory" "id": "conversationalAgent_0-input-memory-BaseChatMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalAgent_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"tools": ["{{webBrowser_0.data.instance}}"], "tools": ["{{webBrowser_0.data.instance}}"],
"model": "{{chatOpenAI_1.data.instance}}", "model": "{{chatOpenAI_1.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}",

View File

@ -187,7 +187,7 @@
"data": { "data": {
"id": "conversationalRetrievalQAChain_0", "id": "conversationalRetrievalQAChain_0",
"label": "Conversational Retrieval QA Chain", "label": "Conversational Retrieval QA Chain",
"version": 2, "version": 3,
"name": "conversationalRetrievalQAChain", "name": "conversationalRetrievalQAChain",
"type": "ConversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain",
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
@ -246,9 +246,19 @@
"optional": true, "optional": true,
"description": "If left empty, a default BufferMemory will be used", "description": "If left empty, a default BufferMemory will be used",
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"vectorStoreRetriever": "{{pinecone_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}",
"memory": "{{RedisBackedChatMemory_0.data.instance}}", "memory": "{{RedisBackedChatMemory_0.data.instance}}",