Chore/API for AgentflowV2 (#4696)
* Enhancement: Introduce prepended chat history handling in Agent and LLM nodes. - Added support for `prependedChatHistory` in both `Agent` and `LLM` classes to allow for initial message context. - Implemented validation for history schema in execution flow to ensure proper format. - Refactored utility functions to include JSON sanitization and validation methods for improved data handling. * update prediction swagger
This commit is contained in:
parent
035b5555a9
commit
543800562e
|
|
@ -1216,15 +1216,18 @@ paths:
|
|||
security:
|
||||
- bearerAuth: []
|
||||
operationId: createPrediction
|
||||
summary: Create a new prediction
|
||||
description: Create a new prediction
|
||||
summary: Send message to flow and get AI response
|
||||
description: |
|
||||
Send a message to your flow and receive an AI-generated response. This is the primary endpoint for interacting with your flows and assistants.
|
||||
**Authentication**: API key may be required depending on flow settings.
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Chatflow ID
|
||||
description: Flow ID - the unique identifier of your flow
|
||||
example: 'your-flow-id'
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
|
|
@ -1236,24 +1239,36 @@ paths:
|
|||
properties:
|
||||
question:
|
||||
type: string
|
||||
description: Question to ask during the prediction process
|
||||
description: Question/message to send to the flow
|
||||
example: 'Analyze this uploaded file and summarize its contents'
|
||||
files:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: binary
|
||||
description: Files to be uploaded
|
||||
modelName:
|
||||
description: Files to be uploaded (images, audio, documents, etc.)
|
||||
streaming:
|
||||
type: boolean
|
||||
description: Enable streaming responses
|
||||
default: false
|
||||
overrideConfig:
|
||||
type: string
|
||||
nullable: true
|
||||
example: ''
|
||||
description: Other override configurations
|
||||
description: JSON string of configuration overrides
|
||||
example: '{"sessionId":"user-123","temperature":0.7}'
|
||||
history:
|
||||
type: string
|
||||
description: JSON string of conversation history
|
||||
example: '[{"role":"userMessage","content":"Hello"},{"role":"apiMessage","content":"Hi there!"}]'
|
||||
humanInput:
|
||||
type: string
|
||||
description: JSON string of human input for resuming execution
|
||||
example: '{"type":"proceed","feedback":"Continue with the plan"}'
|
||||
required:
|
||||
- question
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Prediction created successfully
|
||||
description: Successful prediction response
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
|
|
@ -1261,45 +1276,106 @@ paths:
|
|||
properties:
|
||||
text:
|
||||
type: string
|
||||
description: The result of the prediction
|
||||
description: The AI-generated response text
|
||||
example: 'Artificial intelligence (AI) is a branch of computer science that focuses on creating systems capable of performing tasks that typically require human intelligence.'
|
||||
json:
|
||||
type: object
|
||||
description: The result of the prediction in JSON format if available
|
||||
description: The result in JSON format if available (for structured outputs)
|
||||
nullable: true
|
||||
question:
|
||||
type: string
|
||||
description: The question asked during the prediction process
|
||||
description: The original question/message sent to the flow
|
||||
example: 'What is artificial intelligence?'
|
||||
chatId:
|
||||
type: string
|
||||
description: The chat ID associated with the prediction
|
||||
description: Unique identifier for the chat session
|
||||
example: 'chat-12345'
|
||||
chatMessageId:
|
||||
type: string
|
||||
description: The chat message ID associated with the prediction
|
||||
description: Unique identifier for this specific message
|
||||
example: 'msg-67890'
|
||||
sessionId:
|
||||
type: string
|
||||
description: The session ID associated with the prediction
|
||||
description: Session identifier for conversation continuity
|
||||
example: 'user-session-123'
|
||||
nullable: true
|
||||
memoryType:
|
||||
type: string
|
||||
description: The memory type associated with the prediction
|
||||
description: Type of memory used for conversation context
|
||||
example: 'Buffer Memory'
|
||||
nullable: true
|
||||
sourceDocuments:
|
||||
type: array
|
||||
description: Documents retrieved from vector store (if RAG is enabled)
|
||||
items:
|
||||
$ref: '#/components/schemas/Document'
|
||||
nullable: true
|
||||
usedTools:
|
||||
type: array
|
||||
description: Tools that were invoked during the response generation
|
||||
items:
|
||||
$ref: '#/components/schemas/UsedTool'
|
||||
fileAnnotations:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/FileAnnotation'
|
||||
nullable: true
|
||||
'400':
|
||||
description: Invalid input provided
|
||||
description: Bad Request - Invalid input provided or request format is incorrect
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Invalid request format. Check required fields and parameter types.'
|
||||
'401':
|
||||
description: Unauthorized - API key required or invalid
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Unauthorized access. Please verify your API key.'
|
||||
'404':
|
||||
description: Chatflow not found
|
||||
description: Not Found - Chatflow with specified ID does not exist
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Chatflow not found. Please verify the chatflow ID.'
|
||||
'413':
|
||||
description: Payload Too Large - Request payload exceeds size limits
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Request payload too large. Please reduce file sizes or split large requests.'
|
||||
'422':
|
||||
description: Validation error
|
||||
description: Validation Error - Request validation failed
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Validation failed. Check parameter requirements and data types.'
|
||||
'500':
|
||||
description: Internal server error
|
||||
description: Internal Server Error - Flow configuration or execution error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Internal server error. Check flow configuration and node settings.'
|
||||
/tools:
|
||||
post:
|
||||
tags:
|
||||
|
|
@ -2011,13 +2087,33 @@ components:
|
|||
properties:
|
||||
question:
|
||||
type: string
|
||||
description: The question being asked
|
||||
description: The question/message to send to the flow
|
||||
example: 'What is artificial intelligence?'
|
||||
form:
|
||||
type: object
|
||||
description: The form object to send to the flow (alternative to question for Agentflow V2)
|
||||
additionalProperties: true
|
||||
example:
|
||||
title: 'Example'
|
||||
count: 1
|
||||
streaming:
|
||||
type: boolean
|
||||
description: Enable streaming responses for real-time output
|
||||
default: false
|
||||
example: false
|
||||
overrideConfig:
|
||||
type: object
|
||||
description: The configuration to override the default prediction settings (optional)
|
||||
description: Override flow configuration and pass variables at runtime
|
||||
additionalProperties: true
|
||||
example:
|
||||
sessionId: 'user-session-123'
|
||||
temperature: 0.7
|
||||
maxTokens: 500
|
||||
vars:
|
||||
user_name: 'Alice'
|
||||
history:
|
||||
type: array
|
||||
description: The history messages to be prepended (optional)
|
||||
description: Previous conversation messages for context
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -2030,8 +2126,14 @@ components:
|
|||
type: string
|
||||
description: The content of the message
|
||||
example: 'Hello, how can I help you?'
|
||||
example:
|
||||
- role: 'apiMessage'
|
||||
content: "Hello! I'm an AI assistant. How can I help you today?"
|
||||
- role: 'userMessage'
|
||||
content: "Hi, my name is Sarah and I'm learning about AI"
|
||||
uploads:
|
||||
type: array
|
||||
description: Files to upload (images, audio, documents, etc.)
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -2051,7 +2153,42 @@ components:
|
|||
mime:
|
||||
type: string
|
||||
description: The MIME type of the file or resource
|
||||
enum:
|
||||
[
|
||||
'image/png',
|
||||
'image/jpeg',
|
||||
'image/jpg',
|
||||
'image/gif',
|
||||
'image/webp',
|
||||
'audio/mp4',
|
||||
'audio/webm',
|
||||
'audio/wav',
|
||||
'audio/mpeg',
|
||||
'audio/ogg',
|
||||
'audio/aac'
|
||||
]
|
||||
example: 'image/png'
|
||||
example:
|
||||
- type: 'file'
|
||||
name: 'example.png'
|
||||
data: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG'
|
||||
mime: 'image/png'
|
||||
humanInput:
|
||||
type: object
|
||||
description: Return human feedback and resume execution from a stopped checkpoint
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
enum: [proceed, reject]
|
||||
description: Type of human input response
|
||||
example: 'reject'
|
||||
feedback:
|
||||
type: string
|
||||
description: Feedback to the last output
|
||||
example: 'Include more emoji'
|
||||
example:
|
||||
type: 'reject'
|
||||
feedback: 'Include more emoji'
|
||||
|
||||
Tool:
|
||||
type: object
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import {
|
|||
ICommonObject,
|
||||
IDatabaseEntity,
|
||||
IHumanInput,
|
||||
IMessage,
|
||||
INode,
|
||||
INodeData,
|
||||
INodeOptionsValue,
|
||||
|
|
@ -696,6 +697,7 @@ class Agent_Agentflow implements INode {
|
|||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? []
|
||||
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
|
||||
const prependedChatHistory = options.prependedChatHistory as IMessage[]
|
||||
const chatId = options.chatId as string
|
||||
|
||||
// Initialize the LLM model instance
|
||||
|
|
@ -730,6 +732,18 @@ class Agent_Agentflow implements INode {
|
|||
// Use to keep track of past messages with image file references
|
||||
let pastImageMessagesWithFileRef: BaseMessageLike[] = []
|
||||
|
||||
// Prepend history ONLY if it is the first node
|
||||
if (prependedChatHistory.length > 0 && !runtimeChatHistory.length) {
|
||||
for (const msg of prependedChatHistory) {
|
||||
const role: string = msg.role === 'apiMessage' ? 'assistant' : 'user'
|
||||
const content: string = msg.content ?? ''
|
||||
messages.push({
|
||||
role,
|
||||
content
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for (const msg of agentMessages) {
|
||||
const role = msg.role
|
||||
const content = msg.content
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
import { ICommonObject, IMessage, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
import { AIMessageChunk, BaseMessageLike, MessageContentText } from '@langchain/core/messages'
|
||||
import { DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt'
|
||||
import { z } from 'zod'
|
||||
|
|
@ -359,6 +359,7 @@ class LLM_Agentflow implements INode {
|
|||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? []
|
||||
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
|
||||
const prependedChatHistory = options.prependedChatHistory as IMessage[]
|
||||
const chatId = options.chatId as string
|
||||
|
||||
// Initialize the LLM model instance
|
||||
|
|
@ -382,6 +383,18 @@ class LLM_Agentflow implements INode {
|
|||
// Use to keep track of past messages with image file references
|
||||
let pastImageMessagesWithFileRef: BaseMessageLike[] = []
|
||||
|
||||
// Prepend history ONLY if it is the first node
|
||||
if (prependedChatHistory.length > 0 && !runtimeChatHistory.length) {
|
||||
for (const msg of prependedChatHistory) {
|
||||
const role: string = msg.role === 'apiMessage' ? 'assistant' : 'user'
|
||||
const content: string = msg.content ?? ''
|
||||
messages.push({
|
||||
role,
|
||||
content
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for (const msg of llmMessages) {
|
||||
const role = msg.role
|
||||
const content = msg.content
|
||||
|
|
|
|||
|
|
@ -322,7 +322,7 @@ export interface IOverrideConfig {
|
|||
label: string
|
||||
name: string
|
||||
type: string
|
||||
schema?: ICommonObject[]
|
||||
schema?: ICommonObject[] | Record<string, string>
|
||||
}
|
||||
|
||||
export type ICredentialDataDecrypted = ICommonObject
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import { Execution } from '../../database/entities/Execution'
|
|||
import { InternalFlowiseError } from '../../errors/internalFlowiseError'
|
||||
import { getErrorMessage } from '../../errors/utils'
|
||||
import { ExecutionState, IAgentflowExecutedData } from '../../Interface'
|
||||
import { _removeCredentialId } from '../../utils/buildAgentflow'
|
||||
import { _removeCredentialId } from '../../utils'
|
||||
import { getRunningExpressApp } from '../../utils/getRunningExpressApp'
|
||||
|
||||
export interface ExecutionFilters {
|
||||
|
|
|
|||
|
|
@ -41,7 +41,9 @@ import {
|
|||
getStartingNode,
|
||||
getTelemetryFlowObj,
|
||||
QUESTION_VAR_PREFIX,
|
||||
CURRENT_DATE_TIME_VAR_PREFIX
|
||||
CURRENT_DATE_TIME_VAR_PREFIX,
|
||||
_removeCredentialId,
|
||||
validateHistorySchema
|
||||
} from '.'
|
||||
import { ChatFlow } from '../database/entities/ChatFlow'
|
||||
import { Variable } from '../database/entities/Variable'
|
||||
|
|
@ -105,6 +107,7 @@ interface IExecuteNodeParams {
|
|||
evaluationRunId?: string
|
||||
isInternal: boolean
|
||||
pastChatHistory: IMessage[]
|
||||
prependedChatHistory: IMessage[]
|
||||
appDataSource: DataSource
|
||||
usageCacheManager: UsageCacheManager
|
||||
telemetry: Telemetry
|
||||
|
|
@ -203,21 +206,6 @@ const updateExecution = async (appDataSource: DataSource, executionId: string, w
|
|||
await appDataSource.getRepository(Execution).save(execution)
|
||||
}
|
||||
|
||||
export const _removeCredentialId = (obj: any): any => {
|
||||
if (!obj || typeof obj !== 'object') return obj
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map((item) => _removeCredentialId(item))
|
||||
}
|
||||
|
||||
const newObj: Record<string, any> = {}
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
if (key === 'FLOWISE_CREDENTIAL_ID') continue
|
||||
newObj[key] = _removeCredentialId(value)
|
||||
}
|
||||
return newObj
|
||||
}
|
||||
|
||||
export const resolveVariables = async (
|
||||
reactFlowNodeData: INodeData,
|
||||
question: string,
|
||||
|
|
@ -820,6 +808,7 @@ const executeNode = async ({
|
|||
evaluationRunId,
|
||||
parentExecutionId,
|
||||
pastChatHistory,
|
||||
prependedChatHistory,
|
||||
appDataSource,
|
||||
usageCacheManager,
|
||||
telemetry,
|
||||
|
|
@ -927,6 +916,7 @@ const executeNode = async ({
|
|||
humanInputAction = lastNodeOutput?.humanInputAction
|
||||
}
|
||||
|
||||
// This is when human in the loop is resumed
|
||||
if (humanInput && nodeId === humanInput.startNodeId) {
|
||||
reactFlowNodeData.inputs = { ...reactFlowNodeData.inputs, humanInput }
|
||||
// Remove the stopped humanInput from execution data
|
||||
|
|
@ -973,6 +963,7 @@ const executeNode = async ({
|
|||
isLastNode,
|
||||
sseStreamer,
|
||||
pastChatHistory,
|
||||
prependedChatHistory,
|
||||
agentflowRuntime,
|
||||
abortController,
|
||||
analyticHandlers,
|
||||
|
|
@ -1297,6 +1288,17 @@ export const executeAgentFlow = async ({
|
|||
const chatflowid = chatflow.id
|
||||
const sessionId = incomingInput.sessionId ?? chatId
|
||||
const humanInput: IHumanInput | undefined = incomingInput.humanInput
|
||||
|
||||
// Validate history schema if provided
|
||||
if (incomingInput.history && incomingInput.history.length > 0) {
|
||||
if (!validateHistorySchema(incomingInput.history)) {
|
||||
throw new Error(
|
||||
'Invalid history format. Each history item must have: ' + '{ role: "apiMessage" | "userMessage", content: string }'
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const prependedChatHistory = incomingInput.history ?? []
|
||||
const apiMessageId = uuidv4()
|
||||
|
||||
/*** Get chatflows and prepare data ***/
|
||||
|
|
@ -1413,35 +1415,90 @@ export const executeAgentFlow = async ({
|
|||
}
|
||||
|
||||
// If it is human input, find the last checkpoint and resume
|
||||
if (humanInput?.startNodeId) {
|
||||
if (humanInput) {
|
||||
if (!previousExecution) {
|
||||
throw new Error(`No previous execution found for session ${sessionId}`)
|
||||
}
|
||||
|
||||
if (previousExecution.state !== 'STOPPED') {
|
||||
let executionData = JSON.parse(previousExecution.executionData) as IAgentflowExecutedData[]
|
||||
let shouldUpdateExecution = false
|
||||
|
||||
// Handle different execution states
|
||||
if (previousExecution.state === 'STOPPED') {
|
||||
// Normal case - execution is stopped and ready to resume
|
||||
logger.debug(` ✅ Previous execution is in STOPPED state, ready to resume`)
|
||||
} else if (previousExecution.state === 'ERROR') {
|
||||
// Check if second-to-last execution item is STOPPED and last is ERROR
|
||||
if (executionData.length >= 2) {
|
||||
const lastItem = executionData[executionData.length - 1]
|
||||
const secondLastItem = executionData[executionData.length - 2]
|
||||
|
||||
if (lastItem.status === 'ERROR' && secondLastItem.status === 'STOPPED') {
|
||||
logger.debug(` 🔄 Found ERROR after STOPPED - removing last error item to allow retry`)
|
||||
logger.debug(` Removing: ${lastItem.nodeId} (${lastItem.nodeLabel}) - ${lastItem.data?.error || 'Unknown error'}`)
|
||||
|
||||
// Remove the last ERROR item
|
||||
executionData = executionData.slice(0, -1)
|
||||
shouldUpdateExecution = true
|
||||
} else {
|
||||
throw new Error(
|
||||
`Cannot resume execution ${previousExecution.id} because it is in 'ERROR' state ` +
|
||||
`and the previous item is not in 'STOPPED' state. Only executions that ended with a ` +
|
||||
`STOPPED state (or ERROR after STOPPED) can be resumed.`
|
||||
)
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
`Cannot resume execution ${previousExecution.id} because it is in 'ERROR' state ` +
|
||||
`with insufficient execution data. Only executions in 'STOPPED' state can be resumed.`
|
||||
)
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
`Cannot resume execution ${previousExecution.id} because it is in '${previousExecution.state}' state. ` +
|
||||
`Only executions in 'STOPPED' state can be resumed.`
|
||||
`Only executions in 'STOPPED' state (or 'ERROR' after 'STOPPED') can be resumed.`
|
||||
)
|
||||
}
|
||||
|
||||
startingNodeIds.push(humanInput.startNodeId)
|
||||
checkForMultipleStartNodes(startingNodeIds, isRecursive, nodes)
|
||||
let startNodeId = humanInput.startNodeId
|
||||
|
||||
const executionData = JSON.parse(previousExecution.executionData) as IAgentflowExecutedData[]
|
||||
// If startNodeId is not provided, find the last node with STOPPED status from execution data
|
||||
if (!startNodeId) {
|
||||
// Search in reverse order to find the last (most recent) STOPPED node
|
||||
const stoppedNode = [...executionData].reverse().find((data) => data.status === 'STOPPED')
|
||||
|
||||
// Verify that the humanInputAgentflow node exists in previous execution
|
||||
const humanInputNodeExists = executionData.some((data) => data.nodeId === humanInput.startNodeId)
|
||||
if (!stoppedNode) {
|
||||
throw new Error('No stopped node found in previous execution data to resume from')
|
||||
}
|
||||
|
||||
if (!humanInputNodeExists) {
|
||||
startNodeId = stoppedNode.nodeId
|
||||
logger.debug(` 🔍 Auto-detected stopped node to resume from: ${startNodeId} (${stoppedNode.nodeLabel})`)
|
||||
}
|
||||
|
||||
// Verify that the node exists in previous execution
|
||||
const nodeExists = executionData.some((data) => data.nodeId === startNodeId)
|
||||
|
||||
if (!nodeExists) {
|
||||
throw new Error(
|
||||
`Human Input node ${humanInput.startNodeId} not found in previous execution. ` +
|
||||
`Node ${startNodeId} not found in previous execution. ` +
|
||||
`This could indicate an invalid resume attempt or a modified flow.`
|
||||
)
|
||||
}
|
||||
|
||||
startingNodeIds.push(startNodeId)
|
||||
checkForMultipleStartNodes(startingNodeIds, isRecursive, nodes)
|
||||
|
||||
agentFlowExecutedData.push(...executionData)
|
||||
|
||||
// Update execution data if we removed an error item
|
||||
if (shouldUpdateExecution) {
|
||||
logger.debug(` 📝 Updating execution data after removing error item`)
|
||||
await updateExecution(appDataSource, previousExecution.id, workspaceId, {
|
||||
executionData: JSON.stringify(executionData),
|
||||
state: 'INPROGRESS'
|
||||
})
|
||||
}
|
||||
|
||||
// Get last state
|
||||
const lastState = executionData[executionData.length - 1].data.state
|
||||
|
||||
|
|
@ -1454,6 +1511,9 @@ export const executeAgentFlow = async ({
|
|||
})
|
||||
newExecution = previousExecution
|
||||
parentExecutionId = previousExecution.id
|
||||
|
||||
// Update humanInput with the resolved startNodeId
|
||||
humanInput.startNodeId = startNodeId
|
||||
} else if (isRecursive && parentExecutionId) {
|
||||
const { startingNodeIds: startingNodeIdsFromFlow } = getStartingNode(nodeDependencies)
|
||||
startingNodeIds.push(...startingNodeIdsFromFlow)
|
||||
|
|
@ -1604,6 +1664,7 @@ export const executeAgentFlow = async ({
|
|||
parentExecutionId,
|
||||
isInternal,
|
||||
pastChatHistory,
|
||||
prependedChatHistory,
|
||||
appDataSource,
|
||||
usageCacheManager,
|
||||
telemetry,
|
||||
|
|
|
|||
|
|
@ -1103,12 +1103,13 @@ export const replaceInputsWithConfig = (
|
|||
* Several conditions:
|
||||
* 1. If config is 'analytics', always allow it
|
||||
* 2. If config is 'vars', check its object and filter out the variables that are not enabled for override
|
||||
* 3. If typeof config's value is an object, check if the node id is in the overrideConfig object and if the parameter (systemMessagePrompt) is enabled
|
||||
* 3. If typeof config's value is an array, check if the parameter is enabled and apply directly
|
||||
* 4. If typeof config's value is an object, check if the node id is in the overrideConfig object and if the parameter (systemMessagePrompt) is enabled
|
||||
* Example:
|
||||
* "systemMessagePrompt": {
|
||||
* "chatPromptTemplate_0": "You are an assistant"
|
||||
* }
|
||||
* 4. If typeof config's value is a string, check if the parameter is enabled
|
||||
* 5. If typeof config's value is a string, check if the parameter is enabled
|
||||
* Example:
|
||||
* "systemMessagePrompt": "You are an assistant"
|
||||
*/
|
||||
|
|
@ -1129,6 +1130,12 @@ export const replaceInputsWithConfig = (
|
|||
}
|
||||
overrideConfig[config] = filteredVars
|
||||
}
|
||||
} else if (Array.isArray(overrideConfig[config])) {
|
||||
// Handle arrays as direct parameter values
|
||||
if (isParameterEnabled(flowNodeData.label, config)) {
|
||||
inputsObj[config] = overrideConfig[config]
|
||||
}
|
||||
continue
|
||||
} else if (overrideConfig[config] && typeof overrideConfig[config] === 'object') {
|
||||
const nodeIds = Object.keys(overrideConfig[config])
|
||||
if (nodeIds.includes(flowNodeData.id)) {
|
||||
|
|
@ -1352,6 +1359,48 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component
|
|||
schema: arraySchema
|
||||
}
|
||||
}
|
||||
} else if (inputParam.loadConfig) {
|
||||
const configData = flowNode?.data?.inputs?.[`${inputParam.name}Config`]
|
||||
if (configData) {
|
||||
// Parse config data to extract schema
|
||||
let parsedConfig: any = {}
|
||||
try {
|
||||
parsedConfig = typeof configData === 'string' ? JSON.parse(configData) : configData
|
||||
} catch (e) {
|
||||
// If parsing fails, treat as empty object
|
||||
parsedConfig = {}
|
||||
}
|
||||
|
||||
// Generate schema from config structure
|
||||
const configSchema: Record<string, string> = {}
|
||||
parsedConfig = _removeCredentialId(parsedConfig)
|
||||
for (const key in parsedConfig) {
|
||||
if (key === inputParam.name) continue
|
||||
const value = parsedConfig[key]
|
||||
let fieldType = 'string' // default type
|
||||
|
||||
if (typeof value === 'boolean') {
|
||||
fieldType = 'boolean'
|
||||
} else if (typeof value === 'number') {
|
||||
fieldType = 'number'
|
||||
} else if (Array.isArray(value)) {
|
||||
fieldType = 'array'
|
||||
} else if (typeof value === 'object' && value !== null) {
|
||||
fieldType = 'object'
|
||||
}
|
||||
|
||||
configSchema[key] = fieldType
|
||||
}
|
||||
|
||||
obj = {
|
||||
node: flowNode.data.label,
|
||||
nodeId: flowNode.data.id,
|
||||
label: `${inputParam.label} Config`,
|
||||
name: `${inputParam.name}Config`,
|
||||
type: `json`,
|
||||
schema: configSchema
|
||||
}
|
||||
}
|
||||
} else {
|
||||
obj = {
|
||||
node: flowNode.data.label,
|
||||
|
|
@ -1930,3 +1979,48 @@ export const getAllNodesInPath = (startNode: string, graph: INodeDirectedGraph):
|
|||
|
||||
return Array.from(nodes)
|
||||
}
|
||||
|
||||
export const _removeCredentialId = (obj: any): any => {
|
||||
if (!obj || typeof obj !== 'object') return obj
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map((item) => _removeCredentialId(item))
|
||||
}
|
||||
|
||||
const newObj: Record<string, any> = {}
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
if (key === 'FLOWISE_CREDENTIAL_ID') continue
|
||||
newObj[key] = _removeCredentialId(value)
|
||||
}
|
||||
return newObj
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that history items follow the expected schema
|
||||
* @param {any[]} history - Array of history items to validate
|
||||
* @returns {boolean} - True if all items are valid, false otherwise
|
||||
*/
|
||||
export const validateHistorySchema = (history: any[]): boolean => {
|
||||
if (!Array.isArray(history)) {
|
||||
return false
|
||||
}
|
||||
|
||||
return history.every((item) => {
|
||||
// Check if item is an object
|
||||
if (typeof item !== 'object' || item === null) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if role exists and is valid
|
||||
if (typeof item.role !== 'string' || !['apiMessage', 'userMessage'].includes(item.role)) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if content exists and is a string
|
||||
if (typeof item.content !== 'string') {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,21 +48,30 @@ const OverrideConfigTable = ({ columns, onToggle, rows, sx }) => {
|
|||
return <SwitchInput onChange={(enabled) => handleChange(enabled, row)} value={row.enabled} />
|
||||
} else if (key === 'type' && row.schema) {
|
||||
// If there's schema information, add a tooltip
|
||||
const schemaContent =
|
||||
'[<br>' +
|
||||
row.schema
|
||||
.map(
|
||||
(item) =>
|
||||
` ${JSON.stringify(
|
||||
{
|
||||
[item.name]: item.type
|
||||
},
|
||||
null,
|
||||
2
|
||||
)}`
|
||||
)
|
||||
.join(',<br>') +
|
||||
'<br>]'
|
||||
let schemaContent
|
||||
if (Array.isArray(row.schema)) {
|
||||
// Handle array format: [{ name: "field", type: "string" }, ...]
|
||||
schemaContent =
|
||||
'[<br>' +
|
||||
row.schema
|
||||
.map(
|
||||
(item) =>
|
||||
` ${JSON.stringify(
|
||||
{
|
||||
[item.name]: item.type
|
||||
},
|
||||
null,
|
||||
2
|
||||
)}`
|
||||
)
|
||||
.join(',<br>') +
|
||||
'<br>]'
|
||||
} else if (typeof row.schema === 'object' && row.schema !== null) {
|
||||
// Handle object format: { "field": "string", "field2": "number", ... }
|
||||
schemaContent = JSON.stringify(row.schema, null, 2).replace(/\n/g, '<br>').replace(/ /g, ' ')
|
||||
} else {
|
||||
schemaContent = 'No schema available'
|
||||
}
|
||||
|
||||
return (
|
||||
<Stack direction='row' alignItems='center' spacing={1}>
|
||||
|
|
|
|||
|
|
@ -11,21 +11,30 @@ export const TableViewOnly = ({ columns, rows, sx }) => {
|
|||
return row[key] ? <Chip label='Enabled' color='primary' /> : <Chip label='Disabled' />
|
||||
} else if (key === 'type' && row.schema) {
|
||||
// If there's schema information, add a tooltip
|
||||
const schemaContent =
|
||||
'[<br>' +
|
||||
row.schema
|
||||
.map(
|
||||
(item) =>
|
||||
` ${JSON.stringify(
|
||||
{
|
||||
[item.name]: item.type
|
||||
},
|
||||
null,
|
||||
2
|
||||
)}`
|
||||
)
|
||||
.join(',<br>') +
|
||||
'<br>]'
|
||||
let schemaContent
|
||||
if (Array.isArray(row.schema)) {
|
||||
// Handle array format: [{ name: "field", type: "string" }, ...]
|
||||
schemaContent =
|
||||
'[<br>' +
|
||||
row.schema
|
||||
.map(
|
||||
(item) =>
|
||||
` ${JSON.stringify(
|
||||
{
|
||||
[item.name]: item.type
|
||||
},
|
||||
null,
|
||||
2
|
||||
)}`
|
||||
)
|
||||
.join(',<br>') +
|
||||
'<br>]'
|
||||
} else if (typeof row.schema === 'object' && row.schema !== null) {
|
||||
// Handle object format: { "field": "string", "field2": "number", ... }
|
||||
schemaContent = JSON.stringify(row.schema, null, 2).replace(/\n/g, '<br>').replace(/ /g, ' ')
|
||||
} else {
|
||||
schemaContent = 'No schema available'
|
||||
}
|
||||
|
||||
return (
|
||||
<Stack direction='row' alignItems='center' spacing={1}>
|
||||
|
|
|
|||
Loading…
Reference in New Issue