slight naming changes

This commit is contained in:
Henry 2023-11-22 17:10:18 +00:00
parent 619fb4f5c1
commit c274085d42
5 changed files with 454 additions and 13 deletions

View File

@ -37,14 +37,6 @@ class LLMChain_Chains implements INode {
name: 'model',
type: 'BaseLanguageModel'
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Prompt',
name: 'prompt',
@ -56,6 +48,14 @@ class LLMChain_Chains implements INode {
type: 'BaseLLMOutputParser',
optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Chain Name',
name: 'chainName',
@ -166,6 +166,7 @@ const runPrediction = async (
// Use the output of the moderation chain as input for the LLM chain
input = await checkInputs(moderations, chain.llm, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(isStreaming, e.message, socketIO, socketIOClientId)
return formatResponse(e.message)
}

View File

@ -15,7 +15,7 @@ class OpenAIModeration implements INode {
inputs: INodeParams[]
constructor() {
this.label = 'Moderation - Open AI'
this.label = 'OpenAI Moderation'
this.name = 'inputModerationOpenAI'
this.version = 1.0
this.type = 'Moderation'

View File

@ -15,13 +15,13 @@ class SimplePromptModeration implements INode {
inputs: INodeParams[]
constructor() {
this.label = 'Moderation - Simple Prompt'
this.label = 'Simple Prompt Moderation'
this.name = 'inputModerationSimple'
this.version = 1.0
this.type = 'Moderation'
this.icon = 'simple_moderation.png'
this.category = 'Responsible AI'
this.description = 'Detecting and mitigating prompt attacks'
this.description = 'Check whether input consists of any text from Deny list, and prevent being sent to LLM'
this.baseClasses = [this.type, ...getBaseClasses(Moderation)]
this.inputs = [
{
@ -44,7 +44,6 @@ class SimplePromptModeration implements INode {
]
}
// eslint-disable-next-line unused-imports/no-unused-vars
async init(nodeData: INodeData): Promise<any> {
const denyList = nodeData.inputs?.denyList as string
const moderationErrorMessage = nodeData.inputs?.moderationErrorMessage as string

View File

@ -13,7 +13,7 @@ export class SimplePromptModerationRunner implements Moderation {
this.moderationErrorMessage = moderationErrorMessage
}
async checkForViolations(llm: BaseLanguageModel, input: string): Promise<string> {
async checkForViolations(_: BaseLanguageModel, input: string): Promise<string> {
this.denyList.split('\n').forEach((denyListItem) => {
if (denyListItem && denyListItem !== '' && input.includes(denyListItem)) {
throw Error(this.moderationErrorMessage)

View File

@ -0,0 +1,441 @@
{
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"badge": "NEW",
"nodes": [
{
"width": 300,
"height": 356,
"id": "inputModerationOpenAI_0",
"position": {
"x": 334.36040624369247,
"y": 467.88081727992824
},
"type": "customNode",
"data": {
"id": "inputModerationOpenAI_0",
"label": "OpenAI Moderation",
"version": 1,
"name": "inputModerationOpenAI",
"type": "Moderation",
"baseClasses": ["Moderation", "ResponsibleAI"],
"category": "Responsible AI",
"description": "Check whether content complies with OpenAI usage policies.",
"inputParams": [
{
"label": "Error Message",
"name": "moderationErrorMessage",
"type": "string",
"rows": 2,
"default": "Cannot Process! Input violates OpenAI's content moderation policies.",
"optional": true,
"id": "inputModerationOpenAI_0-input-moderationErrorMessage-string"
}
],
"inputAnchors": [],
"inputs": {
"moderationErrorMessage": "Cannot Process! Input violates OpenAI's content moderation policies."
},
"outputAnchors": [
{
"id": "inputModerationOpenAI_0-output-inputModerationOpenAI-Moderation|ResponsibleAI",
"name": "inputModerationOpenAI",
"label": "Moderation",
"type": "Moderation | ResponsibleAI"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 334.36040624369247,
"y": 467.88081727992824
},
"dragging": false
},
{
"width": 300,
"height": 507,
"id": "llmChain_0",
"position": {
"x": 859.216454729136,
"y": 154.86846618352752
},
"type": "customNode",
"data": {
"id": "llmChain_0",
"label": "LLM Chain",
"version": 3,
"name": "llmChain",
"type": "LLMChain",
"baseClasses": ["LLMChain", "BaseChain", "Runnable"],
"category": "Chains",
"description": "Chain to run queries against LLMs",
"inputParams": [
{
"label": "Chain Name",
"name": "chainName",
"type": "string",
"placeholder": "Name Your Chain",
"optional": true,
"id": "llmChain_0-input-chainName-string"
}
],
"inputAnchors": [
{
"label": "Language Model",
"name": "model",
"type": "BaseLanguageModel",
"id": "llmChain_0-input-model-BaseLanguageModel"
},
{
"label": "Prompt",
"name": "prompt",
"type": "BasePromptTemplate",
"id": "llmChain_0-input-prompt-BasePromptTemplate"
},
{
"label": "Output Parser",
"name": "outputParser",
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOpenAI_0.data.instance}}",
"prompt": "{{promptTemplate_0.data.instance}}",
"outputParser": "",
"inputModeration": ["{{inputModerationOpenAI_0.data.instance}}"],
"chainName": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable",
"name": "llmChain",
"label": "LLM Chain",
"type": "LLMChain | BaseChain | Runnable"
},
{
"id": "llmChain_0-output-outputPrediction-string|json",
"name": "outputPrediction",
"label": "Output Prediction",
"type": "string | json"
}
],
"default": "llmChain"
}
],
"outputs": {
"output": "llmChain"
},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 859.216454729136,
"y": 154.86846618352752
},
"dragging": false
},
{
"width": 300,
"height": 574,
"id": "chatOpenAI_0",
"position": {
"x": 424.69244822381864,
"y": -271.138349609141
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
},
{
"label": "gpt-4-32k",
"name": "gpt-4-32k"
},
{
"label": "gpt-4-32k-0613",
"name": "gpt-4-32k-0613"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
},
{
"label": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k"
},
{
"label": "gpt-3.5-turbo-16k-0613",
"name": "gpt-3.5-turbo-16k-0613"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-basepath-string"
},
{
"label": "BaseOptions",
"name": "baseOptions",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOpenAI_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 424.69244822381864,
"y": -271.138349609141
},
"dragging": false
},
{
"width": 300,
"height": 475,
"id": "promptTemplate_0",
"position": {
"x": -17.005933033720936,
"y": -20.829788775850602
},
"type": "customNode",
"data": {
"id": "promptTemplate_0",
"label": "Prompt Template",
"version": 1,
"name": "promptTemplate",
"type": "PromptTemplate",
"baseClasses": ["PromptTemplate", "BaseStringPromptTemplate", "BasePromptTemplate", "Runnable"],
"category": "Prompts",
"description": "Schema to represent a basic prompt for an LLM",
"inputParams": [
{
"label": "Template",
"name": "template",
"type": "string",
"rows": 4,
"placeholder": "What is a good name for a company that makes {product}?",
"id": "promptTemplate_0-input-template-string"
},
{
"label": "Format Prompt Values",
"name": "promptValues",
"type": "json",
"optional": true,
"acceptVariable": true,
"list": true,
"id": "promptTemplate_0-input-promptValues-json"
}
],
"inputAnchors": [],
"inputs": {
"template": "Answer user question:\n{text}",
"promptValues": "{\"history\":\"{{chat_history}}\"}"
},
"outputAnchors": [
{
"id": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable",
"name": "promptTemplate",
"label": "PromptTemplate",
"type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": -17.005933033720936,
"y": -20.829788775850602
},
"dragging": false
}
],
"edges": [
{
"source": "inputModerationOpenAI_0",
"sourceHandle": "inputModerationOpenAI_0-output-inputModerationOpenAI-Moderation|ResponsibleAI",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-inputModeration-Moderation",
"type": "buttonedge",
"id": "inputModerationOpenAI_0-inputModerationOpenAI_0-output-inputModerationOpenAI-Moderation|ResponsibleAI-llmChain_0-llmChain_0-input-inputModeration-Moderation",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-model-BaseLanguageModel",
"type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel",
"data": {
"label": ""
}
},
{
"source": "promptTemplate_0",
"sourceHandle": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable",
"target": "llmChain_0",
"targetHandle": "llmChain_0-input-prompt-BasePromptTemplate",
"type": "buttonedge",
"id": "promptTemplate_0-promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate",
"data": {
"label": ""
}
}
]
}