added input moderation to assistant, prevent app from crashing by using try catch
This commit is contained in:
parent
6921967e9d
commit
85e6fad0aa
|
|
@ -9,6 +9,8 @@ import fetch from 'node-fetch'
|
|||
import { flatten, uniqWith, isEqual } from 'lodash'
|
||||
import { zodToJsonSchema } from 'zod-to-json-schema'
|
||||
import { AnalyticHandler } from '../../../src/handler'
|
||||
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class OpenAIAssistant_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -24,7 +26,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'OpenAI Assistant'
|
||||
this.name = 'openAIAssistant'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'OpenAIAssistant'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'assistant.svg'
|
||||
|
|
@ -43,6 +45,14 @@ class OpenAIAssistant_Agents implements INode {
|
|||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Disable File Download',
|
||||
name: 'disableFileDownload',
|
||||
|
|
@ -133,6 +143,20 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
const disableFileDownload = nodeData.inputs?.disableFileDownload as boolean
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
const isStreaming = options.socketIO && options.socketIOClientId
|
||||
const socketIO = isStreaming ? options.socketIO : undefined
|
||||
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(isStreaming, e.message, socketIO, socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
|
|
@ -249,7 +273,12 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const actions: ICommonObject[] = []
|
||||
run.required_action.submit_tool_outputs.tool_calls.forEach((item) => {
|
||||
const functionCall = item.function
|
||||
const args = JSON.parse(functionCall.arguments)
|
||||
let args = {}
|
||||
try {
|
||||
args = JSON.parse(functionCall.arguments)
|
||||
} catch (e) {
|
||||
console.error('Error parsing arguments, default to empty object')
|
||||
}
|
||||
actions.push({
|
||||
tool: functionCall.name,
|
||||
toolInput: args,
|
||||
|
|
@ -264,21 +293,32 @@ class OpenAIAssistant_Agents implements INode {
|
|||
|
||||
// Start tool analytics
|
||||
const toolIds = await analyticHandlers.onToolStart(tool.name, actions[i].toolInput, parentIds)
|
||||
if (options.socketIO && options.socketIOClientId)
|
||||
options.socketIO.to(options.socketIOClientId).emit('tool', tool.name)
|
||||
|
||||
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, threadId)
|
||||
|
||||
// End tool analytics
|
||||
await analyticHandlers.onToolEnd(toolIds, toolOutput)
|
||||
|
||||
submitToolOutputs.push({
|
||||
tool_call_id: actions[i].toolCallId,
|
||||
output: toolOutput
|
||||
})
|
||||
usedTools.push({
|
||||
tool: tool.name,
|
||||
toolInput: actions[i].toolInput,
|
||||
toolOutput
|
||||
})
|
||||
try {
|
||||
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, threadId)
|
||||
await analyticHandlers.onToolEnd(toolIds, toolOutput)
|
||||
submitToolOutputs.push({
|
||||
tool_call_id: actions[i].toolCallId,
|
||||
output: toolOutput
|
||||
})
|
||||
usedTools.push({
|
||||
tool: tool.name,
|
||||
toolInput: actions[i].toolInput,
|
||||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
clearInterval(timeout)
|
||||
reject(
|
||||
new Error(
|
||||
`Error processing thread: ${state}, Thread ID: ${threadId}, Run ID: ${runId}, Tool: ${tool.name}`
|
||||
)
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (submitToolOutputs.length) {
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@
|
|||
"data": {
|
||||
"id": "openAIAssistant_0",
|
||||
"label": "OpenAI Assistant",
|
||||
"version": 2,
|
||||
"version": 3,
|
||||
"name": "openAIAssistant",
|
||||
"type": "OpenAIAssistant",
|
||||
"baseClasses": ["OpenAIAssistant"],
|
||||
|
|
@ -45,6 +45,15 @@
|
|||
"type": "Tool",
|
||||
"list": true,
|
||||
"id": "openAIAssistant_0-input-tools-Tool"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "openAIAssistant_0-input-inputModeration-Moderation"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
|
|
|
|||
Loading…
Reference in New Issue