add input moderation to conversation chain

This commit is contained in:
Henry 2024-01-30 17:57:11 +00:00
parent 436b3aae75
commit 214b312fe5
3 changed files with 47 additions and 4 deletions

View File

@ -7,6 +7,8 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from
import { RunnableSequence } from 'langchain/schema/runnable' import { RunnableSequence } from 'langchain/schema/runnable'
import { StringOutputParser } from 'langchain/schema/output_parser' import { StringOutputParser } from 'langchain/schema/output_parser'
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
const inputKey = 'input' const inputKey = 'input'
@ -26,7 +28,7 @@ class ConversationChain_Chains implements INode {
constructor(fields?: { sessionId?: string }) { constructor(fields?: { sessionId?: string }) {
this.label = 'Conversation Chain' this.label = 'Conversation Chain'
this.name = 'conversationChain' this.name = 'conversationChain'
this.version = 2.0 this.version = 3.0
this.type = 'ConversationChain' this.type = 'ConversationChain'
this.icon = 'conv.svg' this.icon = 'conv.svg'
this.category = 'Chains' this.category = 'Chains'
@ -60,6 +62,14 @@ class ConversationChain_Chains implements INode {
optional: true, optional: true,
list: true list: true
},*/ },*/
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{ {
label: 'System Message', label: 'System Message',
name: 'systemMessagePrompt', name: 'systemMessagePrompt',
@ -80,8 +90,21 @@ class ConversationChain_Chains implements INode {
return chain return chain
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const memory = nodeData.inputs?.memory const memory = nodeData.inputs?.memory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the LLM chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const chain = prepareChain(nodeData, this.sessionId, options.chatHistory) const chain = prepareChain(nodeData, this.sessionId, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger) const loggerHandler = new ConsoleCallbackHandler(options.logger)

View File

@ -70,7 +70,7 @@
"data": { "data": {
"id": "conversationChain_0", "id": "conversationChain_0",
"label": "Conversation Chain", "label": "Conversation Chain",
"version": 2, "version": 3,
"name": "conversationChain", "name": "conversationChain",
"type": "ConversationChain", "type": "ConversationChain",
"baseClasses": ["ConversationChain", "LLMChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationChain", "LLMChain", "BaseChain", "Runnable"],
@ -110,9 +110,19 @@
"description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable", "description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable",
"optional": true, "optional": true,
"id": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate" "id": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatAnthropic_0.data.instance}}", "model": "{{chatAnthropic_0.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}",
"chatPromptTemplate": "{{chatPromptTemplate_0.data.instance}}", "chatPromptTemplate": "{{chatPromptTemplate_0.data.instance}}",

View File

@ -269,7 +269,7 @@
"data": { "data": {
"id": "conversationChain_0", "id": "conversationChain_0",
"label": "Conversation Chain", "label": "Conversation Chain",
"version": 2, "version": 3,
"name": "conversationChain", "name": "conversationChain",
"type": "ConversationChain", "type": "ConversationChain",
"baseClasses": ["ConversationChain", "LLMChain", "BaseChain", "Runnable"], "baseClasses": ["ConversationChain", "LLMChain", "BaseChain", "Runnable"],
@ -309,9 +309,19 @@
"description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable", "description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable",
"optional": true, "optional": true,
"id": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate" "id": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationChain_0-input-inputModeration-Moderation"
} }
], ],
"inputs": { "inputs": {
"inputModeration": "",
"model": "{{chatOpenAI_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}",
"chatPromptTemplate": "", "chatPromptTemplate": "",