Feature/OpenAI Response API (#5014)

* - Added support for built-in OpenAI tools including web search, code interpreter, and image generation.
- Enhanced file handling by extracting artifacts and file annotations from response metadata.
- Implemented download functionality for file annotations in the UI.
- Updated chat history management to include additional kwargs for artifacts, file annotations, and used tools.
- Improved UI components to display used tools and file annotations effectively.

* remove redundant currentContainerId

* update comment
This commit is contained in:
Henry Heng 2025-08-07 17:59:05 +01:00 committed by GitHub
parent 3187377c61
commit b608219642
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 860 additions and 14 deletions

View File

@ -28,6 +28,9 @@ import {
replaceBase64ImagesWithFileReferences,
updateFlowState
} from '../utils'
import { convertMultiOptionsToStringArray, getCredentialData, getCredentialParam } from '../../../src/utils'
import { addSingleFileToStorage } from '../../../src/storageUtils'
import fetch from 'node-fetch'
interface ITool {
agentSelectedTool: string
@ -78,7 +81,7 @@ class Agent_Agentflow implements INode {
constructor() {
this.label = 'Agent'
this.name = 'agentAgentflow'
this.version = 1.0
this.version = 2.0
this.type = 'Agent'
this.category = 'Agent Flows'
this.description = 'Dynamically choose and utilize tools during runtime, enabling multi-step reasoning'
@ -132,6 +135,32 @@ class Agent_Agentflow implements INode {
}
]
},
{
label: 'OpenAI Built-in Tools',
name: 'agentToolsBuiltInOpenAI',
type: 'multiOptions',
optional: true,
options: [
{
label: 'Web Search',
name: 'web_search_preview',
description: 'Search the web for the latest information'
},
{
label: 'Code Interpreter',
name: 'code_interpreter',
description: 'Write and run Python code in a sandboxed environment'
},
{
label: 'Image Generation',
name: 'image_generation',
description: 'Generate images based on a text prompt'
}
],
show: {
agentModel: 'chatOpenAI'
}
},
{
label: 'Tools',
name: 'agentTools',
@ -716,6 +745,26 @@ class Agent_Agentflow implements INode {
const llmWithoutToolsBind = (await newLLMNodeInstance.init(newNodeData, '', options)) as BaseChatModel
let llmNodeInstance = llmWithoutToolsBind
const agentToolsBuiltInOpenAI = convertMultiOptionsToStringArray(nodeData.inputs?.agentToolsBuiltInOpenAI)
if (agentToolsBuiltInOpenAI && agentToolsBuiltInOpenAI.length > 0) {
for (const tool of agentToolsBuiltInOpenAI) {
const builtInTool: ICommonObject = {
type: tool
}
if (tool === 'code_interpreter') {
builtInTool.container = { type: 'auto' }
}
;(toolsInstance as any).push(builtInTool)
;(availableTools as any).push({
name: tool,
toolNode: {
label: tool,
name: tool
}
})
}
}
if (llmNodeInstance && toolsInstance.length > 0) {
if (llmNodeInstance.bindTools === undefined) {
throw new Error(`Agent needs to have a function calling capable models.`)
@ -814,6 +863,7 @@ class Agent_Agentflow implements INode {
let usedTools: IUsedTool[] = []
let sourceDocuments: Array<any> = []
let artifacts: any[] = []
let fileAnnotations: any[] = []
let additionalTokens = 0
let isWaitingForHumanInput = false
@ -879,6 +929,9 @@ class Agent_Agentflow implements INode {
}
}
// Address built in tools (after artifacts are processed)
const builtInUsedTools: IUsedTool[] = await this.extractBuiltInUsedTools(response, [])
if (!humanInput && response.tool_calls && response.tool_calls.length > 0) {
const result = await this.handleToolCalls({
response,
@ -954,6 +1007,46 @@ class Agent_Agentflow implements INode {
} else {
finalResponse = JSON.stringify(response, null, 2)
}
// Address built in tools
const additionalBuiltInUsedTools: IUsedTool[] = await this.extractBuiltInUsedTools(response, builtInUsedTools)
if (additionalBuiltInUsedTools.length > 0) {
usedTools = [...new Set([...usedTools, ...additionalBuiltInUsedTools])]
// Stream used tools if this is the last node
if (isLastNode && sseStreamer) {
sseStreamer.streamUsedToolsEvent(chatId, flatten(usedTools))
}
}
// Extract artifacts from annotations in response metadata
if (response.response_metadata) {
const { artifacts: extractedArtifacts, fileAnnotations: extractedFileAnnotations } =
await this.extractArtifactsFromResponse(response.response_metadata, newNodeData, options)
if (extractedArtifacts.length > 0) {
artifacts = [...artifacts, ...extractedArtifacts]
// Stream artifacts if this is the last node
if (isLastNode && sseStreamer) {
sseStreamer.streamArtifactsEvent(chatId, extractedArtifacts)
}
}
if (extractedFileAnnotations.length > 0) {
fileAnnotations = [...fileAnnotations, ...extractedFileAnnotations]
// Stream file annotations if this is the last node
if (isLastNode && sseStreamer) {
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
}
}
}
// Replace sandbox links with proper download URLs. Example: [Download the script](sandbox:/mnt/data/dummy_bar_graph.py)
if (finalResponse.includes('sandbox:/')) {
finalResponse = await this.processSandboxLinks(finalResponse, options.baseURL, options.chatflowid, chatId)
}
const output = this.prepareOutputObject(
response,
availableTools,
@ -965,7 +1058,8 @@ class Agent_Agentflow implements INode {
sourceDocuments,
artifacts,
additionalTokens,
isWaitingForHumanInput
isWaitingForHumanInput,
fileAnnotations
)
// End analytics tracking
@ -978,6 +1072,11 @@ class Agent_Agentflow implements INode {
this.sendStreamingEvents(options, chatId, response)
}
// Stream file annotations if any were extracted
if (fileAnnotations.length > 0 && isLastNode && sseStreamer) {
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
}
// Process template variables in state
if (newState && Object.keys(newState).length > 0) {
for (const key in newState) {
@ -1043,7 +1142,16 @@ class Agent_Agentflow implements INode {
{
role: returnRole,
content: finalResponse,
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id,
...(((artifacts && artifacts.length > 0) ||
(fileAnnotations && fileAnnotations.length > 0) ||
(usedTools && usedTools.length > 0)) && {
additional_kwargs: {
...(artifacts && artifacts.length > 0 && { artifacts }),
...(fileAnnotations && fileAnnotations.length > 0 && { fileAnnotations }),
...(usedTools && usedTools.length > 0 && { usedTools })
}
})
}
]
}
@ -1059,6 +1167,105 @@ class Agent_Agentflow implements INode {
}
}
/**
* Extracts built-in used tools from response metadata and processes image generation results
*/
private async extractBuiltInUsedTools(response: AIMessageChunk, builtInUsedTools: IUsedTool[] = []): Promise<IUsedTool[]> {
if (!response.response_metadata) {
return builtInUsedTools
}
const { output, tools } = response.response_metadata
if (!output || !Array.isArray(output) || output.length === 0 || !tools || !Array.isArray(tools) || tools.length === 0) {
return builtInUsedTools
}
for (const outputItem of output) {
if (outputItem.type && outputItem.type.endsWith('_call')) {
let toolInput = outputItem.action ?? outputItem.code
let toolOutput = outputItem.status === 'completed' ? 'Success' : outputItem.status
// Handle image generation calls specially
if (outputItem.type === 'image_generation_call') {
// Create input summary for image generation
toolInput = {
prompt: outputItem.revised_prompt || 'Image generation request',
size: outputItem.size || '1024x1024',
quality: outputItem.quality || 'standard',
output_format: outputItem.output_format || 'png'
}
// Check if image has been processed (base64 replaced with file path)
if (outputItem.result && !outputItem.result.startsWith('data:') && !outputItem.result.includes('base64')) {
toolOutput = `Image generated and saved`
} else {
toolOutput = `Image generated (base64)`
}
}
// Remove "_call" suffix to get the base tool name
const baseToolName = outputItem.type.replace('_call', '')
// Find matching tool that includes the base name in its type
const matchingTool = tools.find((tool) => tool.type && tool.type.includes(baseToolName))
if (matchingTool) {
// Check for duplicates
if (builtInUsedTools.find((tool) => tool.tool === matchingTool.type)) {
continue
}
builtInUsedTools.push({
tool: matchingTool.type,
toolInput,
toolOutput
})
}
}
}
return builtInUsedTools
}
/**
* Saves base64 image data to storage and returns file information
*/
private async saveBase64Image(
outputItem: any,
options: ICommonObject
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> {
try {
if (!outputItem.result) {
return null
}
// Extract base64 data and create buffer
const base64Data = outputItem.result
const imageBuffer = Buffer.from(base64Data, 'base64')
// Determine file extension and MIME type
const outputFormat = outputItem.output_format || 'png'
const fileName = `generated_image_${outputItem.id || Date.now()}.${outputFormat}`
const mimeType = outputFormat === 'png' ? 'image/png' : 'image/jpeg'
// Save the image using the existing storage utility
const { path, totalSize } = await addSingleFileToStorage(
mimeType,
imageBuffer,
fileName,
options.orgId,
options.chatflowid,
options.chatId
)
return { filePath: path, fileName, totalSize }
} catch (error) {
console.error('Error saving base64 image:', error)
return null
}
}
/**
* Handles memory management based on the specified memory type
*/
@ -1265,7 +1472,8 @@ class Agent_Agentflow implements INode {
sourceDocuments: Array<any>,
artifacts: any[],
additionalTokens: number = 0,
isWaitingForHumanInput: boolean = false
isWaitingForHumanInput: boolean = false,
fileAnnotations: any[] = []
): any {
const output: any = {
content: finalResponse,
@ -1296,6 +1504,10 @@ class Agent_Agentflow implements INode {
}
}
if (response.response_metadata) {
output.responseMetadata = response.response_metadata
}
// Add used tools, source documents and artifacts to output
if (usedTools && usedTools.length > 0) {
output.usedTools = flatten(usedTools)
@ -1317,6 +1529,10 @@ class Agent_Agentflow implements INode {
output.isWaitingForHumanInput = isWaitingForHumanInput
}
if (fileAnnotations && fileAnnotations.length > 0) {
output.fileAnnotations = fileAnnotations
}
return output
}
@ -1808,6 +2024,10 @@ class Agent_Agentflow implements INode {
// Get LLM response after tool calls
let newResponse: AIMessageChunk
if (llmNodeInstance && (llmNodeInstance as any).builtInTools && (llmNodeInstance as any).builtInTools.length > 0) {
toolsInstance.push(...(llmNodeInstance as any).builtInTools)
}
if (llmNodeInstance && toolsInstance.length > 0) {
if (llmNodeInstance.bindTools === undefined) {
throw new Error(`Agent needs to have a function calling capable models.`)
@ -1872,6 +2092,224 @@ class Agent_Agentflow implements INode {
return { response: newResponse, usedTools, sourceDocuments, artifacts, totalTokens, isWaitingForHumanInput }
}
/**
* Extracts artifacts from response metadata (both annotations and built-in tools)
*/
private async extractArtifactsFromResponse(
responseMetadata: any,
modelNodeData: INodeData,
options: ICommonObject
): Promise<{ artifacts: any[]; fileAnnotations: any[] }> {
const artifacts: any[] = []
const fileAnnotations: any[] = []
if (!responseMetadata?.output || !Array.isArray(responseMetadata.output)) {
return { artifacts, fileAnnotations }
}
for (const outputItem of responseMetadata.output) {
// Handle container file citations from annotations
if (outputItem.type === 'message' && outputItem.content && Array.isArray(outputItem.content)) {
for (const contentItem of outputItem.content) {
if (contentItem.annotations && Array.isArray(contentItem.annotations)) {
for (const annotation of contentItem.annotations) {
if (annotation.type === 'container_file_citation' && annotation.file_id && annotation.filename) {
try {
// Download and store the file content
const downloadResult = await this.downloadContainerFile(
annotation.container_id,
annotation.file_id,
annotation.filename,
modelNodeData,
options
)
if (downloadResult) {
const fileType = this.getArtifactTypeFromFilename(annotation.filename)
if (fileType === 'png' || fileType === 'jpeg' || fileType === 'jpg') {
const artifact = {
type: fileType,
data: downloadResult.filePath
}
artifacts.push(artifact)
} else {
fileAnnotations.push({
filePath: downloadResult.filePath,
fileName: annotation.filename
})
}
}
} catch (error) {
console.error('Error processing annotation:', error)
}
}
}
}
}
}
// Handle built-in tool artifacts (like image generation)
if (outputItem.type === 'image_generation_call' && outputItem.result) {
try {
const savedImageResult = await this.saveBase64Image(outputItem, options)
if (savedImageResult) {
// Replace the base64 result with the file path in the response metadata
outputItem.result = savedImageResult.filePath
// Create artifact in the same format as other image artifacts
const fileType = this.getArtifactTypeFromFilename(savedImageResult.fileName)
artifacts.push({
type: fileType,
data: savedImageResult.filePath
})
}
} catch (error) {
console.error('Error processing image generation artifact:', error)
}
}
}
return { artifacts, fileAnnotations }
}
/**
* Downloads file content from container file citation
*/
private async downloadContainerFile(
containerId: string,
fileId: string,
filename: string,
modelNodeData: INodeData,
options: ICommonObject
): Promise<{ filePath: string; totalSize: number } | null> {
try {
const credentialData = await getCredentialData(modelNodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, modelNodeData)
if (!openAIApiKey) {
console.warn('No OpenAI API key available for downloading container file')
return null
}
// Download the file using OpenAI Container API
const response = await fetch(`https://api.openai.com/v1/containers/${containerId}/files/${fileId}/content`, {
method: 'GET',
headers: {
Accept: '*/*',
Authorization: `Bearer ${openAIApiKey}`
}
})
if (!response.ok) {
console.warn(
`Failed to download container file ${fileId} from container ${containerId}: ${response.status} ${response.statusText}`
)
return null
}
// Extract the binary data from the Response object
const data = await response.arrayBuffer()
const dataBuffer = Buffer.from(data)
const mimeType = this.getMimeTypeFromFilename(filename)
// Store the file using the same storage utility as OpenAIAssistant
const { path, totalSize } = await addSingleFileToStorage(
mimeType,
dataBuffer,
filename,
options.orgId,
options.chatflowid,
options.chatId
)
return { filePath: path, totalSize }
} catch (error) {
console.error('Error downloading container file:', error)
return null
}
}
/**
* Gets MIME type from filename extension
*/
private getMimeTypeFromFilename(filename: string): string {
const extension = filename.toLowerCase().split('.').pop()
const mimeTypes: { [key: string]: string } = {
png: 'image/png',
jpg: 'image/jpeg',
jpeg: 'image/jpeg',
gif: 'image/gif',
pdf: 'application/pdf',
txt: 'text/plain',
csv: 'text/csv',
json: 'application/json',
html: 'text/html',
xml: 'application/xml'
}
return mimeTypes[extension || ''] || 'application/octet-stream'
}
/**
* Gets artifact type from filename extension for UI rendering
*/
private getArtifactTypeFromFilename(filename: string): string {
const extension = filename.toLowerCase().split('.').pop()
const artifactTypes: { [key: string]: string } = {
png: 'png',
jpg: 'jpeg',
jpeg: 'jpeg',
html: 'html',
htm: 'html',
md: 'markdown',
markdown: 'markdown',
json: 'json',
js: 'javascript',
javascript: 'javascript',
tex: 'latex',
latex: 'latex',
txt: 'text',
csv: 'text',
pdf: 'text'
}
return artifactTypes[extension || ''] || 'text'
}
/**
* Processes sandbox links in the response text and converts them to file annotations
*/
private async processSandboxLinks(text: string, baseURL: string, chatflowId: string, chatId: string): Promise<string> {
let processedResponse = text
// Regex to match sandbox links: [text](sandbox:/path/to/file)
const sandboxLinkRegex = /\[([^\]]+)\]\(sandbox:\/([^)]+)\)/g
const matches = Array.from(text.matchAll(sandboxLinkRegex))
for (const match of matches) {
const fullMatch = match[0]
const linkText = match[1]
const filePath = match[2]
try {
// Extract filename from the file path
const fileName = filePath.split('/').pop() || filePath
// Replace sandbox link with proper download URL
const downloadUrl = `${baseURL}/api/v1/get-upload-file?chatflowId=${chatflowId}&chatId=${chatId}&fileName=${fileName}&download=true`
const newLink = `[${linkText}](${downloadUrl})`
processedResponse = processedResponse.replace(fullMatch, newLink)
} catch (error) {
console.error('Error processing sandbox link:', error)
// If there's an error, remove the sandbox link as fallback
processedResponse = processedResponse.replace(fullMatch, linkText)
}
}
return processedResponse
}
}
module.exports = { nodeClass: Agent_Agentflow }

View File

@ -313,6 +313,9 @@ export const getPastChatHistoryImageMessages = async (
if (message.additional_kwargs && message.additional_kwargs.fileUploads) {
// example: [{"type":"stored-file","name":"0_DiXc4ZklSTo3M8J4.jpg","mime":"image/jpeg"}]
const fileUploads = message.additional_kwargs.fileUploads
const artifacts = message.additional_kwargs.artifacts
const fileAnnotations = message.additional_kwargs.fileAnnotations
const usedTools = message.additional_kwargs.usedTools
try {
let messageWithFileUploads = ''
const uploads: IFileUpload[] = typeof fileUploads === 'string' ? JSON.parse(fileUploads) : fileUploads
@ -358,22 +361,83 @@ export const getPastChatHistoryImageMessages = async (
}
}
const messageContent = messageWithFileUploads ? `${messageWithFileUploads}\n\n${message.content}` : message.content
const hasArtifacts = artifacts && Array.isArray(artifacts) && artifacts.length > 0
const hasFileAnnotations = fileAnnotations && Array.isArray(fileAnnotations) && fileAnnotations.length > 0
const hasUsedTools = usedTools && Array.isArray(usedTools) && usedTools.length > 0
if (imageContents.length > 0) {
chatHistory.push({
const imageMessage: any = {
role: messageRole,
content: imageContents
})
}
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
imageMessage.additional_kwargs = {}
if (hasArtifacts) imageMessage.additional_kwargs.artifacts = artifacts
if (hasFileAnnotations) imageMessage.additional_kwargs.fileAnnotations = fileAnnotations
if (hasUsedTools) imageMessage.additional_kwargs.usedTools = usedTools
}
chatHistory.push(imageMessage)
transformedPastMessages.push({
role: messageRole,
content: [...JSON.parse((pastChatHistory[i] as any).additional_kwargs.fileUploads)]
})
}
chatHistory.push({
const contentMessage: any = {
role: messageRole,
content: messageContent
})
}
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
contentMessage.additional_kwargs = {}
if (hasArtifacts) contentMessage.additional_kwargs.artifacts = artifacts
if (hasFileAnnotations) contentMessage.additional_kwargs.fileAnnotations = fileAnnotations
if (hasUsedTools) contentMessage.additional_kwargs.usedTools = usedTools
}
chatHistory.push(contentMessage)
} catch (e) {
// failed to parse fileUploads, continue with text only
const hasArtifacts = artifacts && Array.isArray(artifacts) && artifacts.length > 0
const hasFileAnnotations = fileAnnotations && Array.isArray(fileAnnotations) && fileAnnotations.length > 0
const hasUsedTools = usedTools && Array.isArray(usedTools) && usedTools.length > 0
const errorMessage: any = {
role: messageRole,
content: message.content
}
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
errorMessage.additional_kwargs = {}
if (hasArtifacts) errorMessage.additional_kwargs.artifacts = artifacts
if (hasFileAnnotations) errorMessage.additional_kwargs.fileAnnotations = fileAnnotations
if (hasUsedTools) errorMessage.additional_kwargs.usedTools = usedTools
}
chatHistory.push(errorMessage)
}
} else if (message.additional_kwargs) {
const hasArtifacts =
message.additional_kwargs.artifacts &&
Array.isArray(message.additional_kwargs.artifacts) &&
message.additional_kwargs.artifacts.length > 0
const hasFileAnnotations =
message.additional_kwargs.fileAnnotations &&
Array.isArray(message.additional_kwargs.fileAnnotations) &&
message.additional_kwargs.fileAnnotations.length > 0
const hasUsedTools =
message.additional_kwargs.usedTools &&
Array.isArray(message.additional_kwargs.usedTools) &&
message.additional_kwargs.usedTools.length > 0
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
const messageAdditionalKwargs: any = {}
if (hasArtifacts) messageAdditionalKwargs.artifacts = message.additional_kwargs.artifacts
if (hasFileAnnotations) messageAdditionalKwargs.fileAnnotations = message.additional_kwargs.fileAnnotations
if (hasUsedTools) messageAdditionalKwargs.usedTools = message.additional_kwargs.usedTools
chatHistory.push({
role: messageRole,
content: message.content,
additional_kwargs: messageAdditionalKwargs
})
} else {
chatHistory.push({
role: messageRole,
content: message.content

View File

@ -16,6 +16,7 @@ const streamUploadedFile = async (req: Request, res: Response, next: NextFunctio
const chatflowId = req.query.chatflowId as string
const chatId = req.query.chatId as string
const fileName = req.query.fileName as string
const download = req.query.download === 'true' // Check if download parameter is set
const appServer = getRunningExpressApp()
@ -35,7 +36,12 @@ const streamUploadedFile = async (req: Request, res: Response, next: NextFunctio
}
const orgId = workspace.organizationId as string
res.setHeader('Content-Disposition', contentDisposition(fileName))
// Set Content-Disposition header - force attachment for download
if (download) {
res.setHeader('Content-Disposition', contentDisposition(fileName, { type: 'attachment' }))
} else {
res.setHeader('Content-Disposition', contentDisposition(fileName))
}
const fileStream = await streamStorageFile(chatflowId, chatId, fileName, orgId)
if (!fileStream) throw new InternalFlowiseError(StatusCodes.INTERNAL_SERVER_ERROR, `Error: streamStorageFile`)

View File

@ -61,3 +61,13 @@
line-height: 1.6;
margin: 0.5em 0;
}
.react-markdown img {
max-width: 100%;
max-height: 400px;
height: auto;
object-fit: contain;
border-radius: 8px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
margin: 10px 0;
}

View File

@ -244,6 +244,63 @@ export const updateOutdatedNodeData = (newComponentNodeData, existingComponentNo
}
}
}
// Handle loadConfig parameters - preserve configuration objects
if (existingComponentNodeData.inputs && initNewComponentNodeData.inputParams) {
// Find parameters with loadConfig: true
const loadConfigParams = initNewComponentNodeData.inputParams.filter((param) => param.loadConfig === true)
for (const param of loadConfigParams) {
const configKey = `${param.name}Config`
// Preserve top-level config objects (e.g., agentModelConfig)
if (existingComponentNodeData.inputs[configKey]) {
initNewComponentNodeData.inputs[configKey] = existingComponentNodeData.inputs[configKey]
}
}
// Handle array parameters that might contain loadConfig items
const arrayParams = initNewComponentNodeData.inputParams.filter((param) => param.type === 'array' && param.array)
for (const arrayParam of arrayParams) {
if (existingComponentNodeData.inputs[arrayParam.name] && Array.isArray(existingComponentNodeData.inputs[arrayParam.name])) {
const existingArray = existingComponentNodeData.inputs[arrayParam.name]
// Find loadConfig parameters within the array definition
const arrayLoadConfigParams = arrayParam.array.filter((subParam) => subParam.loadConfig === true)
if (arrayLoadConfigParams.length > 0) {
// Process each array item to preserve config objects
const updatedArray = existingArray.map((existingItem) => {
if (typeof existingItem === 'object' && existingItem !== null) {
const updatedItem = { ...existingItem }
// Preserve config objects for each loadConfig parameter in the array
for (const loadConfigParam of arrayLoadConfigParams) {
const configKey = `${loadConfigParam.name}Config`
if (existingItem[configKey]) {
updatedItem[configKey] = existingItem[configKey]
}
}
return updatedItem
}
return existingItem
})
initNewComponentNodeData.inputs[arrayParam.name] = updatedArray
}
}
}
// Also preserve any config keys that exist in the existing data but might not be explicitly handled above
// This catches edge cases where config keys exist but don't follow the expected pattern
for (const key in existingComponentNodeData.inputs) {
if (key.endsWith('Config') && !initNewComponentNodeData.inputs[key]) {
initNewComponentNodeData.inputs[key] = existingComponentNodeData.inputs[key]
}
}
}
// Check for tabs
const inputParamsWithTabIdentifiers = initNewComponentNodeData.inputParams.filter((param) => param.tabIdentifier) || []
@ -268,7 +325,7 @@ export const updateOutdatedNodeData = (newComponentNodeData, existingComponentNo
initNewComponentNodeData.label = existingComponentNodeData.label
}
// Special case for Condition node to update outputAnchors
// Special case for Sequential Condition node to update outputAnchors
if (initNewComponentNodeData.name.includes('seqCondition')) {
const options = existingComponentNodeData.outputAnchors[0].options || []

View File

@ -1,6 +1,7 @@
import { useState } from 'react'
import { useSelector } from 'react-redux'
import PropTypes from 'prop-types'
import axios from 'axios'
// MUI
import {
@ -24,7 +25,7 @@ import {
} from '@mui/material'
import { useTheme, darken } from '@mui/material/styles'
import { useSnackbar } from 'notistack'
import { IconCoins, IconClock, IconChevronDown } from '@tabler/icons-react'
import { IconCoins, IconClock, IconChevronDown, IconDownload, IconTool } from '@tabler/icons-react'
import toolSVG from '@/assets/images/tool.svg'
// Project imports
@ -34,6 +35,7 @@ import { AGENTFLOW_ICONS, baseURL } from '@/store/constant'
import { JSONViewer } from '@/ui-component/json/JsonViewer'
import ReactJson from 'flowise-react-json-view'
import { CodeEditor } from '@/ui-component/editor/CodeEditor'
import SourceDocDialog from '@/ui-component/dialog/SourceDocDialog'
import predictionApi from '@/api/prediction'
@ -44,6 +46,8 @@ export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic,
const [feedbackType, setFeedbackType] = useState('')
const [isLoading, setIsLoading] = useState(false)
const [loadingMessage, setLoadingMessage] = useState('')
const [sourceDialogOpen, setSourceDialogOpen] = useState(false)
const [sourceDialogProps, setSourceDialogProps] = useState({})
const customization = useSelector((state) => state.customization)
const theme = useTheme()
const { enqueueSnackbar } = useSnackbar()
@ -160,6 +164,11 @@ export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic,
}
}
const onUsedToolClick = (data, title) => {
setSourceDialogProps({ data, title })
setSourceDialogOpen(true)
}
const handleSubmitFeedback = () => {
onSubmitResponse(feedbackType, feedback)
setOpenFeedbackDialog(false)
@ -167,6 +176,26 @@ export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic,
setFeedbackType('')
}
const downloadFile = async (fileAnnotation) => {
try {
const response = await axios.post(
`${baseURL}/api/v1/openai-assistants-file/download`,
{ fileName: fileAnnotation.fileName, chatflowId: metadata?.agentflowId, chatId: metadata?.sessionId },
{ responseType: 'blob' }
)
const blob = new Blob([response.data], { type: response.headers['content-type'] })
const downloadUrl = window.URL.createObjectURL(blob)
const link = document.createElement('a')
link.href = downloadUrl
link.download = fileAnnotation.fileName
document.body.appendChild(link)
link.click()
link.remove()
} catch (error) {
console.error('Download failed:', error)
}
}
const renderFullfilledConditions = (conditions) => {
const fullfilledConditions = conditions.filter((condition) => condition.isFulfilled)
return fullfilledConditions.map((condition, index) => {
@ -661,6 +690,35 @@ export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic,
</Typography>
</Box>
)}
{message.additional_kwargs?.usedTools && message.additional_kwargs.usedTools.length > 0 && (
<div
style={{
display: 'block',
flexDirection: 'row',
width: '100%',
marginTop: '10px'
}}
>
{message.additional_kwargs.usedTools.map((tool, index) => {
return tool ? (
<Chip
size='small'
key={index}
label={tool.tool}
sx={{
mr: 1,
mt: 1,
borderColor: tool.error ? 'error.main' : undefined,
color: tool.error ? 'error.main' : undefined
}}
variant='outlined'
icon={<IconTool size={15} color={tool.error ? theme.palette.error.main : undefined} />}
onClick={() => onUsedToolClick(tool, 'Used Tools')}
/>
) : null
})}
</div>
)}
{message.additional_kwargs?.artifacts && message.additional_kwargs.artifacts.length > 0 && (
<Box sx={{ mt: 2, mb: 1 }}>
<Box sx={{ display: 'flex', flexDirection: 'column', gap: 2 }}>
@ -691,7 +749,7 @@ export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic,
)}`
: artifact.data
}
sx={{ height: 'auto', maxHeight: '500px' }}
sx={{ height: 'auto', maxHeight: '500px', objectFit: 'contain' }}
alt={`artifact-${artifactIndex}`}
/>
</Card>
@ -797,6 +855,36 @@ export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic,
return <MemoizedReactMarkdown>{`*No data*`}</MemoizedReactMarkdown>
}
})()}
{message.additional_kwargs?.fileAnnotations && message.additional_kwargs.fileAnnotations.length > 0 && (
<div
style={{
display: 'block',
flexDirection: 'row',
width: '100%',
marginTop: '16px',
marginBottom: '8px'
}}
>
{message.additional_kwargs.fileAnnotations.map((fileAnnotation, index) => {
return (
<Button
sx={{
fontSize: '0.85rem',
textTransform: 'none',
mb: 1,
mr: 1
}}
key={index}
variant='outlined'
onClick={() => downloadFile(fileAnnotation)}
endIcon={<IconDownload color={theme.palette.primary.main} />}
>
{fileAnnotation.fileName}
</Button>
)
})}
</div>
)}
</Box>
))
) : data?.input?.form || data?.input?.http || data?.input?.conditions ? (
@ -862,6 +950,106 @@ export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic,
backgroundColor: theme.palette.background.default
}}
>
{data.output?.usedTools && data.output.usedTools.length > 0 && (
<div
style={{
display: 'block',
flexDirection: 'row',
width: '100%'
}}
>
{data.output.usedTools.map((tool, index) => {
return tool ? (
<Chip
size='small'
key={index}
label={tool.tool}
sx={{
mr: 1,
mt: 1,
borderColor: tool.error ? 'error.main' : undefined,
color: tool.error ? 'error.main' : undefined
}}
variant='outlined'
icon={<IconTool size={15} color={tool.error ? theme.palette.error.main : undefined} />}
onClick={() => onUsedToolClick(tool, 'Used Tools')}
/>
) : null
})}
</div>
)}
{data.output?.artifacts && data.output.artifacts.length > 0 && (
<Box sx={{ mt: 2, mb: 1 }}>
<Box sx={{ display: 'flex', flexDirection: 'column', gap: 2 }}>
{data.output.artifacts.map((artifact, artifactIndex) => {
if (artifact.type === 'png' || artifact.type === 'jpeg' || artifact.type === 'jpg') {
return (
<Card
key={`artifact-${artifactIndex}`}
sx={{
p: 0,
m: 0,
flex: '0 0 auto',
border: 1,
borderColor: 'divider',
borderRadius: 1,
overflow: 'hidden'
}}
>
<CardMedia
component='img'
image={
artifact.data.startsWith('FILE-STORAGE::')
? `${baseURL}/api/v1/get-upload-file?chatflowId=${
metadata?.agentflowId
}&chatId=${metadata?.sessionId}&fileName=${artifact.data.replace(
'FILE-STORAGE::',
''
)}`
: artifact.data
}
sx={{ height: 'auto', maxHeight: '500px', objectFit: 'contain' }}
alt={`artifact-${artifactIndex}`}
/>
</Card>
)
} else if (artifact.type === 'html') {
return (
<Box
key={`artifact-${artifactIndex}`}
sx={{
mt: 1,
border: 1,
borderColor: 'divider',
borderRadius: 1,
p: 2,
backgroundColor: theme.palette.background.paper
}}
>
<SafeHTML html={artifact.data} />
</Box>
)
} else {
return (
<Box
key={`artifact-${artifactIndex}`}
sx={{
mt: 1,
border: 1,
borderColor: 'divider',
borderRadius: 1,
p: 2,
backgroundColor: theme.palette.background.paper
}}
>
<MemoizedReactMarkdown>{artifact.data}</MemoizedReactMarkdown>
</Box>
)
}
})}
</Box>
</Box>
)}
{(() => {
// Check if the content is a stringified JSON or array
if (data?.output?.content) {
@ -882,6 +1070,36 @@ export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic,
return <MemoizedReactMarkdown>{`*No data*`}</MemoizedReactMarkdown>
}
})()}
{data.output?.fileAnnotations && data.output.fileAnnotations.length > 0 && (
<div
style={{
display: 'block',
flexDirection: 'row',
width: '100%',
marginTop: '16px',
marginBottom: '8px'
}}
>
{data.output.fileAnnotations.map((fileAnnotation, index) => {
return (
<Button
sx={{
fontSize: '0.85rem',
textTransform: 'none',
mb: 1,
mr: 1
}}
key={index}
variant='outlined'
onClick={() => downloadFile(fileAnnotation)}
endIcon={<IconDownload color={theme.palette.primary.main} />}
>
{fileAnnotation.fileName}
</Button>
)
})}
</div>
)}
</Box>
)}
{data.error && (
@ -1020,6 +1238,7 @@ export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic,
</Dialog>
</>
)}
<SourceDocDialog show={sourceDialogOpen} dialogProps={sourceDialogProps} onCancel={() => setSourceDialogOpen(false)} />
</Box>
)
}

View File

@ -21,7 +21,10 @@ import {
IconTrash,
IconInfoCircle,
IconLoader,
IconAlertCircleFilled
IconAlertCircleFilled,
IconCode,
IconWorldWww,
IconPhoto
} from '@tabler/icons-react'
import StopCircleIcon from '@mui/icons-material/StopCircle'
import CancelIcon from '@mui/icons-material/Cancel'
@ -126,6 +129,19 @@ const AgentFlowNode = ({ data }) => {
return <foundIcon.icon size={24} color={'white'} />
}
const getBuiltInOpenAIToolIcon = (toolName) => {
switch (toolName) {
case 'web_search_preview':
return <IconWorldWww size={14} color={'white'} />
case 'code_interpreter':
return <IconCode size={14} color={'white'} />
case 'image_generation':
return <IconPhoto size={14} color={'white'} />
default:
return null
}
}
useEffect(() => {
if (ref.current) {
setTimeout(() => {
@ -407,7 +423,17 @@ const AgentFlowNode = ({ data }) => {
: [],
toolProperty: ['selectedTool', 'toolAgentflowSelectedTool']
},
{ tools: data.inputs?.agentKnowledgeVSEmbeddings, toolProperty: ['vectorStore', 'embeddingModel'] }
{ tools: data.inputs?.agentKnowledgeVSEmbeddings, toolProperty: ['vectorStore', 'embeddingModel'] },
{
tools: data.inputs?.agentToolsBuiltInOpenAI
? (typeof data.inputs.agentToolsBuiltInOpenAI === 'string'
? JSON.parse(data.inputs.agentToolsBuiltInOpenAI)
: data.inputs.agentToolsBuiltInOpenAI
).map((tool) => ({ builtInTool: tool }))
: [],
toolProperty: 'builtInTool',
isBuiltInOpenAI: true
}
]
// Filter out undefined tools and render each valid collection
@ -441,6 +467,32 @@ const AgentFlowNode = ({ data }) => {
const toolName = tool[config.toolProperty]
if (!toolName) return []
// Handle built-in OpenAI tools with icons
if (config.isBuiltInOpenAI) {
const icon = getBuiltInOpenAIToolIcon(toolName)
if (!icon) return []
return [
<Box
key={`tool-${configIndex}-${toolIndex}`}
sx={{
width: 20,
height: 20,
borderRadius: '50%',
backgroundColor: customization.isDarkMode
? darken(data.color, 0.5)
: darken(data.color, 0.2),
display: 'flex',
justifyContent: 'center',
alignItems: 'center',
padding: 0.2
}}
>
{icon}
</Box>
]
}
return [
<Box
key={`tool-${configIndex}-${toolIndex}`}