diff --git a/packages/components/nodes/agents/AirtableAgent/AirtableAgent.ts b/packages/components/nodes/agents/AirtableAgent/AirtableAgent.ts index 3113cdfed..34352f6c3 100644 --- a/packages/components/nodes/agents/AirtableAgent/AirtableAgent.ts +++ b/packages/components/nodes/agents/AirtableAgent/AirtableAgent.ts @@ -6,6 +6,8 @@ import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '.. import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class Airtable_Agents implements INode { label: string @@ -22,7 +24,7 @@ class Airtable_Agents implements INode { constructor() { this.label = 'Airtable Agent' this.name = 'airtableAgent' - this.version = 1.0 + this.version = 2.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'airtable.svg' @@ -71,6 +73,14 @@ class Airtable_Agents implements INode { default: 100, additionalParams: true, description: 'Number of results to return' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -80,12 +90,24 @@ class Airtable_Agents implements INode { return undefined } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const model = nodeData.inputs?.model as BaseLanguageModel const baseId = nodeData.inputs?.baseId as string const tableId = nodeData.inputs?.tableId as string const returnAll = nodeData.inputs?.returnAll as boolean const limit = nodeData.inputs?.limit as string + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Vectara chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const accessToken = getCredentialParam('accessToken', credentialData, nodeData) diff --git a/packages/components/nodes/agents/AutoGPT/AutoGPT.ts b/packages/components/nodes/agents/AutoGPT/AutoGPT.ts index 3689a7ea0..4c1d962c3 100644 --- a/packages/components/nodes/agents/AutoGPT/AutoGPT.ts +++ b/packages/components/nodes/agents/AutoGPT/AutoGPT.ts @@ -7,6 +7,8 @@ import { PromptTemplate } from '@langchain/core/prompts' import { AutoGPT } from 'langchain/experimental/autogpt' import { LLMChain } from 'langchain/chains' import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' type ObjectTool = StructuredTool const FINISH_NAME = 'finish' @@ -25,7 +27,7 @@ class AutoGPT_Agents implements INode { constructor() { this.label = 'AutoGPT' this.name = 'autoGPT' - this.version = 1.0 + this.version = 2.0 this.type = 'AutoGPT' this.category = 'Agents' this.icon = 'autogpt.svg' @@ -68,6 +70,14 @@ class AutoGPT_Agents implements INode { type: 'number', default: 5, optional: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -92,9 +102,21 @@ class AutoGPT_Agents implements INode { return autogpt } - async run(nodeData: INodeData, input: string): Promise { + async run(nodeData: INodeData, input: string): Promise { const executor = nodeData.instance as AutoGPT const model = nodeData.inputs?.model as BaseChatModel + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the AutoGPT agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } try { let totalAssistantReply = '' diff --git a/packages/components/nodes/agents/BabyAGI/BabyAGI.ts b/packages/components/nodes/agents/BabyAGI/BabyAGI.ts index c70cd800f..bfc910b79 100644 --- a/packages/components/nodes/agents/BabyAGI/BabyAGI.ts +++ b/packages/components/nodes/agents/BabyAGI/BabyAGI.ts @@ -2,6 +2,8 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { VectorStore } from '@langchain/core/vectorstores' import { INode, INodeData, INodeParams } from '../../../src/Interface' import { BabyAGI } from './core' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class BabyAGI_Agents implements INode { label: string @@ -17,7 +19,7 @@ class BabyAGI_Agents implements INode { constructor() { this.label = 'BabyAGI' this.name = 'babyAGI' - this.version = 1.0 + this.version = 2.0 this.type = 'BabyAGI' this.category = 'Agents' this.icon = 'babyagi.svg' @@ -39,6 +41,14 @@ class BabyAGI_Agents implements INode { name: 'taskLoop', type: 'number', default: 3 + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -53,8 +63,21 @@ class BabyAGI_Agents implements INode { return babyAgi } - async run(nodeData: INodeData, input: string): Promise { + async run(nodeData: INodeData, input: string): Promise { const executor = nodeData.instance as BabyAGI + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the BabyAGI agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + const objective = input const res = await executor.call({ objective }) diff --git a/packages/components/nodes/agents/CSVAgent/CSVAgent.ts b/packages/components/nodes/agents/CSVAgent/CSVAgent.ts index f55981ab4..428b02e23 100644 --- a/packages/components/nodes/agents/CSVAgent/CSVAgent.ts +++ b/packages/components/nodes/agents/CSVAgent/CSVAgent.ts @@ -5,6 +5,8 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class CSV_Agents implements INode { label: string @@ -20,7 +22,7 @@ class CSV_Agents implements INode { constructor() { this.label = 'CSV Agent' this.name = 'csvAgent' - this.version = 1.0 + this.version = 2.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'CSVagent.svg' @@ -47,6 +49,14 @@ class CSV_Agents implements INode { optional: true, placeholder: 'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -56,10 +66,22 @@ class CSV_Agents implements INode { return undefined } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const csvFileBase64 = nodeData.inputs?.csvFile as string const model = nodeData.inputs?.model as BaseLanguageModel const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the CSV agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const loggerHandler = new ConsoleCallbackHandler(options.logger) const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index db6b37c6a..802ee6f83 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -13,6 +13,8 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } import { AgentExecutor } from '../../../src/agents' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { addImagesToMessages } from '../../../src/multiModalUtils' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. @@ -46,7 +48,7 @@ class ConversationalAgent_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'Conversational Agent' this.name = 'conversationalAgent' - this.version = 2.0 + this.version = 3.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'agent.svg' @@ -77,6 +79,14 @@ class ConversationalAgent_Agents implements INode { default: DEFAULT_PREFIX, optional: true, additionalParams: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -86,9 +96,20 @@ class ConversationalAgent_Agents implements INode { return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the BabyAGI agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const executor = await prepareAgent( nodeData, options, diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts index 36bc6807d..c61c2544f 100644 --- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts +++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts @@ -10,6 +10,8 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { AgentExecutor, formatAgentSteps } from '../../../src/agents' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.` @@ -28,7 +30,7 @@ class ConversationalRetrievalAgent_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'Conversational Retrieval Agent' this.name = 'conversationalRetrievalAgent' - this.version = 3.0 + this.version = 4.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'agent.svg' @@ -59,6 +61,14 @@ class ConversationalRetrievalAgent_Agents implements INode { rows: 4, optional: true, additionalParams: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -68,8 +78,21 @@ class ConversationalRetrievalAgent_Agents implements INode { return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the BabyAGI agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index 5923d77ea..a2d0f2590 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -12,6 +12,8 @@ import { getBaseClasses } from '../../../src/utils' import { createReactAgent } from '../../../src/agents' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { addImagesToMessages } from '../../../src/multiModalUtils' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class MRKLAgentChat_Agents implements INode { label: string @@ -28,7 +30,7 @@ class MRKLAgentChat_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'ReAct Agent for Chat Models' this.name = 'mrklAgentChat' - this.version = 3.0 + this.version = 4.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'agent.svg' @@ -50,6 +52,14 @@ class MRKLAgentChat_Agents implements INode { label: 'Memory', name: 'memory', type: 'BaseChatMemory' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -59,10 +69,22 @@ class MRKLAgentChat_Agents implements INode { return null } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory const model = nodeData.inputs?.model as BaseChatModel let tools = nodeData.inputs?.tools as Tool[] + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the ReAct Agent for Chat Models + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } tools = flatten(tools) const prompt = await pull('hwchase17/react-chat') diff --git a/packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts b/packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts index 452cf4378..179885e34 100644 --- a/packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts +++ b/packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts @@ -8,6 +8,8 @@ import { additionalCallbacks } from '../../../src/handler' import { getBaseClasses } from '../../../src/utils' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { createReactAgent } from '../../../src/agents' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class MRKLAgentLLM_Agents implements INode { label: string @@ -23,7 +25,7 @@ class MRKLAgentLLM_Agents implements INode { constructor() { this.label = 'ReAct Agent for LLMs' this.name = 'mrklAgentLLM' - this.version = 1.0 + this.version = 2.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'agent.svg' @@ -40,6 +42,14 @@ class MRKLAgentLLM_Agents implements INode { label: 'Language Model', name: 'model', type: 'BaseLanguageModel' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -48,9 +58,22 @@ class MRKLAgentLLM_Agents implements INode { return null } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const model = nodeData.inputs?.model as BaseLanguageModel let tools = nodeData.inputs?.tools as Tool[] + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the ReAct Agent for LLMs + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + tools = flatten(tools) const prompt = await pull('hwchase17/react') diff --git a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts index 0acadca16..2fc1a2293 100644 --- a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts +++ b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts @@ -10,6 +10,8 @@ import { getBaseClasses } from '../../../src/utils' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { AgentExecutor, formatAgentSteps } from '../../../src/agents' +import { Moderation, checkInputs } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class OpenAIFunctionAgent_Agents implements INode { label: string @@ -26,7 +28,7 @@ class OpenAIFunctionAgent_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'OpenAI Function Agent' this.name = 'openAIFunctionAgent' - this.version = 3.0 + this.version = 4.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'function.svg' @@ -56,6 +58,14 @@ class OpenAIFunctionAgent_Agents implements INode { rows: 4, optional: true, additionalParams: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -67,6 +77,19 @@ class OpenAIFunctionAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the OpenAI Function Agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) diff --git a/packages/components/nodes/agents/XMLAgent/XMLAgent.ts b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts index 491099478..167214f85 100644 --- a/packages/components/nodes/agents/XMLAgent/XMLAgent.ts +++ b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts @@ -11,7 +11,8 @@ import { getBaseClasses } from '../../../src/utils' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { AgentExecutor } from '../../../src/agents' -//import { AgentExecutor } from "langchain/agents"; +import { Moderation, checkInputs } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' const defaultSystemMessage = `You are a helpful assistant. Help the user answer any questions. @@ -52,7 +53,7 @@ class XMLAgent_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'XML Agent' this.name = 'xmlAgent' - this.version = 1.0 + this.version = 2.0 this.type = 'XMLAgent' this.category = 'Agents' this.icon = 'xmlagent.svg' @@ -83,6 +84,14 @@ class XMLAgent_Agents implements INode { rows: 4, default: defaultSystemMessage, additionalParams: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -94,6 +103,18 @@ class XMLAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the OpenAI Function Agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) diff --git a/packages/components/nodes/chains/ApiChain/OpenAPIChain.ts b/packages/components/nodes/chains/ApiChain/OpenAPIChain.ts index d922a1862..e5c11eb34 100644 --- a/packages/components/nodes/chains/ApiChain/OpenAPIChain.ts +++ b/packages/components/nodes/chains/ApiChain/OpenAPIChain.ts @@ -3,6 +3,8 @@ import { APIChain, createOpenAPIChain } from 'langchain/chains' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class OpenApiChain_Chains implements INode { label: string @@ -18,7 +20,7 @@ class OpenApiChain_Chains implements INode { constructor() { this.label = 'OpenAPI Chain' this.name = 'openApiChain' - this.version = 1.0 + this.version = 2.0 this.type = 'OpenAPIChain' this.icon = 'openapi.svg' this.category = 'Chains' @@ -50,6 +52,14 @@ class OpenApiChain_Chains implements INode { type: 'json', additionalParams: true, optional: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -58,11 +68,21 @@ class OpenApiChain_Chains implements INode { return await initChain(nodeData) } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = await initChain(nodeData) const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) - + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the OpenAPI chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } if (options.socketIO && options.socketIOClientId) { const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) const res = await chain.run(input, [loggerHandler, handler, ...callbacks]) diff --git a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts index 46d739d15..ec45b684f 100644 --- a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts +++ b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts @@ -5,6 +5,8 @@ import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from '@langch import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from '@langchain/core/runnables' import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages' import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' import { StringOutputParser } from '@langchain/core/output_parsers' import type { Document } from '@langchain/core/documents' import { BufferMemoryInput } from 'langchain/memory' @@ -36,7 +38,7 @@ class ConversationalRetrievalQAChain_Chains implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'Conversational Retrieval QA Chain' this.name = 'conversationalRetrievalQAChain' - this.version = 2.0 + this.version = 3.0 this.type = 'ConversationalRetrievalQAChain' this.icon = 'qa.svg' this.category = 'Chains' @@ -87,6 +89,14 @@ class ConversationalRetrievalQAChain_Chains implements INode { additionalParams: true, optional: true, default: RESPONSE_TEMPLATE + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } /** Deprecated { @@ -163,6 +173,7 @@ class ConversationalRetrievalQAChain_Chains implements INode { } let memory: FlowiseMemory | undefined = externalMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] if (!memory) { memory = new BufferMemory({ returnMessages: true, @@ -171,6 +182,16 @@ class ConversationalRetrievalQAChain_Chains implements INode { }) } + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Conversational Retrieval QA Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt) const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? [] diff --git a/packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts b/packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts index c4c1d3728..fa91bb205 100644 --- a/packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts +++ b/packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts @@ -3,6 +3,8 @@ import { MultiPromptChain } from 'langchain/chains' import { ICommonObject, INode, INodeData, INodeParams, PromptRetriever } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class MultiPromptChain_Chains implements INode { label: string @@ -18,7 +20,7 @@ class MultiPromptChain_Chains implements INode { constructor() { this.label = 'Multi Prompt Chain' this.name = 'multiPromptChain' - this.version = 1.0 + this.version = 2.0 this.type = 'MultiPromptChain' this.icon = 'prompt.svg' this.category = 'Chains' @@ -35,6 +37,14 @@ class MultiPromptChain_Chains implements INode { name: 'promptRetriever', type: 'PromptRetriever', list: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -62,8 +72,19 @@ class MultiPromptChain_Chains implements INode { return chain } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as MultiPromptChain + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Multi Prompt Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const obj = { input } const loggerHandler = new ConsoleCallbackHandler(options.logger) diff --git a/packages/components/nodes/chains/MultiRetrievalQAChain/MultiRetrievalQAChain.ts b/packages/components/nodes/chains/MultiRetrievalQAChain/MultiRetrievalQAChain.ts index 3cb78ce87..71302d635 100644 --- a/packages/components/nodes/chains/MultiRetrievalQAChain/MultiRetrievalQAChain.ts +++ b/packages/components/nodes/chains/MultiRetrievalQAChain/MultiRetrievalQAChain.ts @@ -3,6 +3,8 @@ import { MultiRetrievalQAChain } from 'langchain/chains' import { ICommonObject, INode, INodeData, INodeParams, VectorStoreRetriever } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class MultiRetrievalQAChain_Chains implements INode { label: string @@ -18,7 +20,7 @@ class MultiRetrievalQAChain_Chains implements INode { constructor() { this.label = 'Multi Retrieval QA Chain' this.name = 'multiRetrievalQAChain' - this.version = 1.0 + this.version = 2.0 this.type = 'MultiRetrievalQAChain' this.icon = 'qa.svg' this.category = 'Chains' @@ -41,6 +43,14 @@ class MultiRetrievalQAChain_Chains implements INode { name: 'returnSourceDocuments', type: 'boolean', optional: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -72,7 +82,17 @@ class MultiRetrievalQAChain_Chains implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as MultiRetrievalQAChain const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean - + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Multi Retrieval QA Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const obj = { input } const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/chains/RetrievalQAChain/RetrievalQAChain.ts b/packages/components/nodes/chains/RetrievalQAChain/RetrievalQAChain.ts index 3968d3c0e..9125f38fc 100644 --- a/packages/components/nodes/chains/RetrievalQAChain/RetrievalQAChain.ts +++ b/packages/components/nodes/chains/RetrievalQAChain/RetrievalQAChain.ts @@ -4,6 +4,8 @@ import { RetrievalQAChain } from 'langchain/chains' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class RetrievalQAChain_Chains implements INode { label: string @@ -19,7 +21,7 @@ class RetrievalQAChain_Chains implements INode { constructor() { this.label = 'Retrieval QA Chain' this.name = 'retrievalQAChain' - this.version = 1.0 + this.version = 2.0 this.type = 'RetrievalQAChain' this.icon = 'qa.svg' this.category = 'Chains' @@ -35,6 +37,14 @@ class RetrievalQAChain_Chains implements INode { label: 'Vector Store Retriever', name: 'vectorStoreRetriever', type: 'BaseRetriever' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -47,8 +57,19 @@ class RetrievalQAChain_Chains implements INode { return chain } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as RetrievalQAChain + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Retrieval QA Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const obj = { query: input } diff --git a/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts b/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts index 2c9f38134..018e1f062 100644 --- a/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts +++ b/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts @@ -7,6 +7,8 @@ import { SqlDatabase } from 'langchain/sql_db' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { getBaseClasses, getInputVariables } from '../../../src/utils' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' type DatabaseType = 'sqlite' | 'postgres' | 'mssql' | 'mysql' @@ -24,7 +26,7 @@ class SqlDatabaseChain_Chains implements INode { constructor() { this.label = 'Sql Database Chain' this.name = 'sqlDatabaseChain' - this.version = 4.0 + this.version = 5.0 this.type = 'SqlDatabaseChain' this.icon = 'sqlchain.svg' this.category = 'Chains' @@ -115,6 +117,14 @@ class SqlDatabaseChain_Chains implements INode { placeholder: DEFAULT_SQL_DATABASE_PROMPT.template + DEFAULT_SQL_DATABASE_PROMPT.templateFormat, additionalParams: true, optional: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -144,7 +154,7 @@ class SqlDatabaseChain_Chains implements INode { return chain } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const databaseType = nodeData.inputs?.database as DatabaseType const model = nodeData.inputs?.model as BaseLanguageModel const url = nodeData.inputs?.url as string @@ -155,6 +165,17 @@ class SqlDatabaseChain_Chains implements INode { const sampleRowsInTableInfo = nodeData.inputs?.sampleRowsInTableInfo as number const topK = nodeData.inputs?.topK as number const customPrompt = nodeData.inputs?.customPrompt as string + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Sql Database Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const chain = await getSQLDBChain( databaseType, diff --git a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts index 97bbaa67c..e5427ca0f 100644 --- a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts +++ b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts @@ -4,6 +4,8 @@ import { VectaraStore } from '@langchain/community/vectorstores/vectara' import { VectorDBQAChain } from 'langchain/chains' import { INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' // functionality based on https://github.com/vectara/vectara-answer const reorderCitations = (unorderedSummary: string) => { @@ -48,7 +50,7 @@ class VectaraChain_Chains implements INode { constructor() { this.label = 'Vectara QA Chain' this.name = 'vectaraQAChain' - this.version = 1.0 + this.version = 2.0 this.type = 'VectaraQAChain' this.icon = 'vectara.png' this.category = 'Chains' @@ -219,6 +221,14 @@ class VectaraChain_Chains implements INode { description: 'Maximum results used to build the summarized response', type: 'number', default: 7 + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -227,7 +237,7 @@ class VectaraChain_Chains implements INode { return null } - async run(nodeData: INodeData, input: string): Promise { + async run(nodeData: INodeData, input: string): Promise { const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng' const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string @@ -252,6 +262,18 @@ class VectaraChain_Chains implements INode { const mmrRerankerId = 272725718 const mmrEnabled = vectaraFilter?.mmrConfig?.enabled + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Vectara chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + const data = { query: [ { diff --git a/packages/components/nodes/chains/VectorDBQAChain/VectorDBQAChain.ts b/packages/components/nodes/chains/VectorDBQAChain/VectorDBQAChain.ts index ef0df01a2..129eb46ac 100644 --- a/packages/components/nodes/chains/VectorDBQAChain/VectorDBQAChain.ts +++ b/packages/components/nodes/chains/VectorDBQAChain/VectorDBQAChain.ts @@ -4,6 +4,8 @@ import { VectorDBQAChain } from 'langchain/chains' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class VectorDBQAChain_Chains implements INode { label: string @@ -19,7 +21,7 @@ class VectorDBQAChain_Chains implements INode { constructor() { this.label = 'VectorDB QA Chain' this.name = 'vectorDBQAChain' - this.version = 1.0 + this.version = 2.0 this.type = 'VectorDBQAChain' this.icon = 'vectordb.svg' this.category = 'Chains' @@ -35,6 +37,14 @@ class VectorDBQAChain_Chains implements INode { label: 'Vector Store', name: 'vectorStore', type: 'VectorStore' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -50,8 +60,20 @@ class VectorDBQAChain_Chains implements INode { return chain } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as VectorDBQAChain + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the VectorDB QA Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const obj = { query: input } diff --git a/packages/server/marketplaces/chatflows/API Agent OpenAI.json b/packages/server/marketplaces/chatflows/API Agent OpenAI.json index 691852d63..a0c380b4a 100644 --- a/packages/server/marketplaces/chatflows/API Agent OpenAI.json +++ b/packages/server/marketplaces/chatflows/API Agent OpenAI.json @@ -15,7 +15,7 @@ "data": { "id": "openApiChain_1", "label": "OpenAPI Chain", - "version": 1, + "version": 2, "name": "openApiChain", "type": "OpenAPIChain", "baseClasses": ["OpenAPIChain", "BaseChain"], @@ -53,9 +53,19 @@ "name": "model", "type": "ChatOpenAI", "id": "openApiChain_1-input-model-ChatOpenAI" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "openApiChain_1-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_1.data.instance}}", "yamlLink": "https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml", "headers": "" @@ -399,7 +409,7 @@ "id": "openAIFunctionAgent_0", "label": "OpenAI Function Agent", "name": "openAIFunctionAgent", - "version": 3, + "version": 4, "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain"], "category": "Agents", @@ -434,9 +444,19 @@ "name": "model", "type": "BaseChatModel", "id": "openAIFunctionAgent_0-input-model-BaseChatModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "openAIFunctionAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{chainTool_0.data.instance}}"], "memory": "{{bufferMemory_0.data.instance}}", "model": "{{chatOpenAI_2.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json index facdcb6b8..7f3f90b46 100644 --- a/packages/server/marketplaces/chatflows/API Agent.json +++ b/packages/server/marketplaces/chatflows/API Agent.json @@ -1100,7 +1100,7 @@ "data": { "id": "conversationalAgent_0", "label": "Conversational Agent", - "version": 2, + "version": 3, "name": "conversationalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -1137,9 +1137,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "conversationalAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"], "model": "{{chatOpenAI_3.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json index bb7c7bdc9..fd6ffc904 100644 --- a/packages/server/marketplaces/chatflows/AutoGPT.json +++ b/packages/server/marketplaces/chatflows/AutoGPT.json @@ -15,7 +15,7 @@ "data": { "id": "autoGPT_0", "label": "AutoGPT", - "version": 1, + "version": 2, "name": "autoGPT", "type": "AutoGPT", "baseClasses": ["AutoGPT"], @@ -66,9 +66,19 @@ "name": "vectorStoreRetriever", "type": "BaseRetriever", "id": "autoGPT_0-input-vectorStoreRetriever-BaseRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "autoGPT_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{readFile_0.data.instance}}", "{{writeFile_1.data.instance}}", "{{serpAPI_0.data.instance}}"], "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json index 8a8000468..a93e71934 100644 --- a/packages/server/marketplaces/chatflows/BabyAGI.json +++ b/packages/server/marketplaces/chatflows/BabyAGI.json @@ -15,7 +15,7 @@ "data": { "id": "babyAGI_1", "label": "BabyAGI", - "version": 1, + "version": 2, "name": "babyAGI", "type": "BabyAGI", "baseClasses": ["BabyAGI"], @@ -42,9 +42,19 @@ "name": "vectorStore", "type": "VectorStore", "id": "babyAGI_1-input-vectorStore-VectorStore" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "babyAGI_1-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStore": "{{pinecone_0.data.instance}}", "taskLoop": 3 diff --git a/packages/server/marketplaces/chatflows/CSV Agent.json b/packages/server/marketplaces/chatflows/CSV Agent.json index 0a0bdce94..92af97352 100644 --- a/packages/server/marketplaces/chatflows/CSV Agent.json +++ b/packages/server/marketplaces/chatflows/CSV Agent.json @@ -16,7 +16,7 @@ "id": "csvAgent_0", "label": "CSV Agent", "name": "csvAgent", - "version": 1, + "version": 2, "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain"], "category": "Agents", @@ -36,9 +36,19 @@ "name": "model", "type": "BaseLanguageModel", "id": "csvAgent_0-input-model-BaseLanguageModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "csvAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}" }, "outputAnchors": [ diff --git a/packages/server/marketplaces/chatflows/Chat with a Podcast.json b/packages/server/marketplaces/chatflows/Chat with a Podcast.json index 6d0344a34..526a85f60 100644 --- a/packages/server/marketplaces/chatflows/Chat with a Podcast.json +++ b/packages/server/marketplaces/chatflows/Chat with a Podcast.json @@ -15,7 +15,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -74,9 +74,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "memory": "", diff --git a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json index cbdc46341..e7f27080f 100644 --- a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json +++ b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json @@ -451,7 +451,7 @@ "id": "mrklAgentChat_0", "label": "MRKL Agent for Chat Models", "name": "mrklAgentChat", - "version": 1, + "version": 2, "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain"], "category": "Agents", @@ -470,9 +470,19 @@ "name": "model", "type": "BaseLanguageModel", "id": "mrklAgentChat_0-input-model-BaseLanguageModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "mrklAgentChat_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{requestsGet_0.data.instance}}", "{{requestsPost_0.data.instance}}", "{{aiPlugin_0.data.instance}}"], "model": "{{chatOpenAI_0.data.instance}}" }, diff --git a/packages/server/marketplaces/chatflows/Conversational Agent.json b/packages/server/marketplaces/chatflows/Conversational Agent.json index d07047d69..3d9340de8 100644 --- a/packages/server/marketplaces/chatflows/Conversational Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Agent.json @@ -392,7 +392,7 @@ "data": { "id": "conversationalAgent_0", "label": "Conversational Agent", - "version": 2, + "version": 3, "name": "conversationalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -429,9 +429,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "conversationalAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{calculator_1.data.instance}}", "{{serpAPI_0.data.instance}}"], "model": "{{chatOpenAI_0.data.instance}}", "memory": "{{bufferMemory_1.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json index 72ac467ec..dbcca2366 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json @@ -123,7 +123,7 @@ "data": { "id": "conversationalRetrievalAgent_0", "label": "Conversational Retrieval Agent", - "version": 3, + "version": 4, "name": "conversationalRetrievalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -159,9 +159,19 @@ "name": "model", "type": "BaseChatModel", "id": "conversationalRetrievalAgent_0-input-model-BaseChatModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{retrieverTool_0.data.instance}}"], "memory": "{{bufferMemory_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json index df3d13896..0b6559ff1 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json @@ -274,7 +274,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -333,9 +333,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}", "memory": "", diff --git a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json index 62c72595e..c3db081b4 100644 --- a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json +++ b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json @@ -158,7 +158,7 @@ "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", "name": "conversationalRetrievalQAChain", - "version": 2, + "version": 3, "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "category": "Chains", @@ -216,9 +216,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "memory": "", diff --git a/packages/server/marketplaces/chatflows/Local QnA.json b/packages/server/marketplaces/chatflows/Local QnA.json index 3e8b93f66..6f4ed8ce4 100644 --- a/packages/server/marketplaces/chatflows/Local QnA.json +++ b/packages/server/marketplaces/chatflows/Local QnA.json @@ -85,7 +85,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -144,9 +144,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOllama_0.data.instance}}", "vectorStoreRetriever": "{{faiss_0.data.instance}}", "memory": "", diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json index bc3b8a767..f6a642cb6 100644 --- a/packages/server/marketplaces/chatflows/Long Term Memory.json +++ b/packages/server/marketplaces/chatflows/Long Term Memory.json @@ -15,7 +15,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -74,9 +74,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{qdrant_0.data.instance}}", "memory": "{{ZepMemory_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json index 147a8cf63..2caefe5d4 100644 --- a/packages/server/marketplaces/chatflows/Metadata Filter.json +++ b/packages/server/marketplaces/chatflows/Metadata Filter.json @@ -251,7 +251,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -310,9 +310,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}", "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", diff --git a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json index 41cd9b175..171cce0ba 100644 --- a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json @@ -84,7 +84,7 @@ "id": "multiPromptChain_0", "label": "Multi Prompt Chain", "name": "multiPromptChain", - "version": 1, + "version": 2, "type": "MultiPromptChain", "baseClasses": ["MultiPromptChain", "MultiRouteChain", "BaseChain", "BaseLangChain"], "category": "Chains", @@ -103,9 +103,19 @@ "type": "PromptRetriever", "list": true, "id": "multiPromptChain_0-input-promptRetriever-PromptRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "multiPromptChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "promptRetriever": [ "{{promptRetriever_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json index 8f762ca91..70ecbcf83 100644 --- a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json @@ -82,7 +82,7 @@ "data": { "id": "multiRetrievalQAChain_0", "label": "Multi Retrieval QA Chain", - "version": 1, + "version": 2, "name": "multiRetrievalQAChain", "type": "MultiRetrievalQAChain", "baseClasses": ["MultiRetrievalQAChain", "MultiRouteChain", "BaseChain", "BaseLangChain"], @@ -109,9 +109,19 @@ "type": "VectorStoreRetriever", "list": true, "id": "multiRetrievalQAChain_0-input-vectorStoreRetriever-VectorStoreRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "multiRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": [ "{{vectorStoreRetriever_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Multiple VectorDB.json b/packages/server/marketplaces/chatflows/Multiple VectorDB.json index db17df542..3de2a08ce 100644 --- a/packages/server/marketplaces/chatflows/Multiple VectorDB.json +++ b/packages/server/marketplaces/chatflows/Multiple VectorDB.json @@ -163,7 +163,7 @@ "data": { "id": "retrievalQAChain_0", "label": "Retrieval QA Chain", - "version": 1, + "version": 2, "name": "retrievalQAChain", "type": "RetrievalQAChain", "baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"], @@ -182,9 +182,19 @@ "name": "vectorStoreRetriever", "type": "BaseRetriever", "id": "retrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "retrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{redis_0.data.instance}}" }, @@ -218,7 +228,7 @@ "data": { "id": "retrievalQAChain_1", "label": "Retrieval QA Chain", - "version": 1, + "version": 2, "name": "retrievalQAChain", "type": "RetrievalQAChain", "baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"], @@ -237,9 +247,19 @@ "name": "vectorStoreRetriever", "type": "BaseRetriever", "id": "retrievalQAChain_1-input-vectorStoreRetriever-BaseRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "retrievalQAChain_1-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_1.data.instance}}", "vectorStoreRetriever": "{{faiss_0.data.instance}}" }, @@ -1741,7 +1761,7 @@ "data": { "id": "conversationalAgent_0", "label": "Conversational Agent", - "version": 2, + "version": 3, "name": "conversationalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -1778,9 +1798,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "conversationalAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{chainTool_2.data.instance}}", "{{chainTool_3.data.instance}}"], "model": "{{chatOpenAI_2.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/OpenAI Agent.json b/packages/server/marketplaces/chatflows/OpenAI Agent.json index f405640c7..065b0b4d3 100644 --- a/packages/server/marketplaces/chatflows/OpenAI Agent.json +++ b/packages/server/marketplaces/chatflows/OpenAI Agent.json @@ -208,7 +208,7 @@ "id": "openAIFunctionAgent_0", "label": "OpenAI Function Agent", "name": "openAIFunctionAgent", - "version": 3, + "version": 4, "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain"], "category": "Agents", @@ -243,9 +243,19 @@ "name": "model", "type": "BaseChatModel", "id": "openAIFunctionAgent_0-input-model-BaseChatModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "openAIFunctionAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{calculator_0.data.instance}}", "{{serper_0.data.instance}}", "{{customTool_0.data.instance}}"], "memory": "{{bufferMemory_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/ReAct Agent.json b/packages/server/marketplaces/chatflows/ReAct Agent.json index a4989c473..7bc2c33fd 100644 --- a/packages/server/marketplaces/chatflows/ReAct Agent.json +++ b/packages/server/marketplaces/chatflows/ReAct Agent.json @@ -52,7 +52,7 @@ "data": { "id": "mrklAgentChat_0", "label": "ReAct Agent for Chat Models", - "version": 3, + "version": 4, "name": "mrklAgentChat", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -78,9 +78,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "mrklAgentChat_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "mrklAgentChat_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{calculator_1.data.instance}}", "{{serper_0.data.instance}}"], "model": "{{chatOpenAI_0.data.instance}}", "memory": "{{RedisBackedChatMemory_0.data.instance}}" diff --git a/packages/server/marketplaces/chatflows/SQL DB Chain.json b/packages/server/marketplaces/chatflows/SQL DB Chain.json index debe4edce..855f9b1f4 100644 --- a/packages/server/marketplaces/chatflows/SQL DB Chain.json +++ b/packages/server/marketplaces/chatflows/SQL DB Chain.json @@ -249,7 +249,7 @@ "data": { "id": "sqlDatabaseChain_0", "label": "Sql Database Chain", - "version": 4, + "version": 5, "name": "sqlDatabaseChain", "type": "SqlDatabaseChain", "baseClasses": ["SqlDatabaseChain", "BaseChain", "Runnable"], @@ -347,9 +347,19 @@ "name": "model", "type": "BaseLanguageModel", "id": "sqlDatabaseChain_0-input-model-BaseLanguageModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "sqlDatabaseChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "database": "sqlite", "url": "", diff --git a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json index 2ef1474a5..c5684ae42 100644 --- a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json +++ b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json @@ -15,7 +15,7 @@ "data": { "id": "vectaraQAChain_0", "label": "Vectara QA Chain", - "version": 1, + "version": 2, "name": "vectaraQAChain", "type": "VectaraQAChain", "baseClasses": ["VectaraQAChain", "BaseChain", "Runnable"], @@ -189,9 +189,19 @@ "name": "vectaraStore", "type": "VectorStore", "id": "vectaraQAChain_0-input-vectaraStore-VectorStore" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "vectaraQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "vectaraStore": "{{vectara_1.data.instance}}", "summarizerPromptName": "vectara-experimental-summary-ext-2023-10-23-small", "responseLang": "eng", diff --git a/packages/server/marketplaces/chatflows/WebBrowser.json b/packages/server/marketplaces/chatflows/WebBrowser.json index d8b7d9f6d..d27298d2f 100644 --- a/packages/server/marketplaces/chatflows/WebBrowser.json +++ b/packages/server/marketplaces/chatflows/WebBrowser.json @@ -702,7 +702,7 @@ "data": { "id": "conversationalAgent_0", "label": "Conversational Agent", - "version": 2, + "version": 3, "name": "conversationalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -739,9 +739,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "conversationalAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{webBrowser_0.data.instance}}"], "model": "{{chatOpenAI_1.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index 5ca29ee98..a913e5f96 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -187,7 +187,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -246,9 +246,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}", "memory": "{{RedisBackedChatMemory_0.data.instance}}",