2053 lines
97 KiB
JSON
2053 lines
97 KiB
JSON
{
|
|
"description": "An agent that can perform question answering over a database",
|
|
"usecases": ["SQL"],
|
|
"nodes": [
|
|
{
|
|
"id": "startAgentflow_0",
|
|
"type": "agentFlow",
|
|
"position": {
|
|
"x": -97,
|
|
"y": 108
|
|
},
|
|
"data": {
|
|
"id": "startAgentflow_0",
|
|
"label": "Start",
|
|
"version": 1.1,
|
|
"name": "startAgentflow",
|
|
"type": "Start",
|
|
"color": "#7EE787",
|
|
"hideInput": true,
|
|
"baseClasses": ["Start"],
|
|
"category": "Agent Flows",
|
|
"description": "Starting point of the agentflow",
|
|
"inputParams": [
|
|
{
|
|
"label": "Input Type",
|
|
"name": "startInputType",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "Chat Input",
|
|
"name": "chatInput",
|
|
"description": "Start the conversation with chat input"
|
|
},
|
|
{
|
|
"label": "Form Input",
|
|
"name": "formInput",
|
|
"description": "Start the workflow with form inputs"
|
|
}
|
|
],
|
|
"default": "chatInput",
|
|
"id": "startAgentflow_0-input-startInputType-options",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Form Title",
|
|
"name": "formTitle",
|
|
"type": "string",
|
|
"placeholder": "Please Fill Out The Form",
|
|
"show": {
|
|
"startInputType": "formInput"
|
|
},
|
|
"id": "startAgentflow_0-input-formTitle-string",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Form Description",
|
|
"name": "formDescription",
|
|
"type": "string",
|
|
"placeholder": "Complete all fields below to continue",
|
|
"show": {
|
|
"startInputType": "formInput"
|
|
},
|
|
"id": "startAgentflow_0-input-formDescription-string",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Form Input Types",
|
|
"name": "formInputTypes",
|
|
"description": "Specify the type of form input",
|
|
"type": "array",
|
|
"show": {
|
|
"startInputType": "formInput"
|
|
},
|
|
"array": [
|
|
{
|
|
"label": "Type",
|
|
"name": "type",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "String",
|
|
"name": "string"
|
|
},
|
|
{
|
|
"label": "Number",
|
|
"name": "number"
|
|
},
|
|
{
|
|
"label": "Boolean",
|
|
"name": "boolean"
|
|
},
|
|
{
|
|
"label": "Options",
|
|
"name": "options"
|
|
}
|
|
],
|
|
"default": "string"
|
|
},
|
|
{
|
|
"label": "Label",
|
|
"name": "label",
|
|
"type": "string",
|
|
"placeholder": "Label for the input"
|
|
},
|
|
{
|
|
"label": "Variable Name",
|
|
"name": "name",
|
|
"type": "string",
|
|
"placeholder": "Variable name for the input (must be camel case)",
|
|
"description": "Variable name must be camel case. For example: firstName, lastName, etc."
|
|
},
|
|
{
|
|
"label": "Add Options",
|
|
"name": "addOptions",
|
|
"type": "array",
|
|
"show": {
|
|
"formInputTypes[$index].type": "options"
|
|
},
|
|
"array": [
|
|
{
|
|
"label": "Option",
|
|
"name": "option",
|
|
"type": "string"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"id": "startAgentflow_0-input-formInputTypes-array",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Ephemeral Memory",
|
|
"name": "startEphemeralMemory",
|
|
"type": "boolean",
|
|
"description": "Start fresh for every execution without past chat history",
|
|
"optional": true,
|
|
"id": "startAgentflow_0-input-startEphemeralMemory-boolean",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Flow State",
|
|
"name": "startState",
|
|
"description": "Runtime state during the execution of the workflow",
|
|
"type": "array",
|
|
"optional": true,
|
|
"array": [
|
|
{
|
|
"label": "Key",
|
|
"name": "key",
|
|
"type": "string",
|
|
"placeholder": "Foo"
|
|
},
|
|
{
|
|
"label": "Value",
|
|
"name": "value",
|
|
"type": "string",
|
|
"placeholder": "Bar",
|
|
"optional": true
|
|
}
|
|
],
|
|
"id": "startAgentflow_0-input-startState-array",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Persist State",
|
|
"name": "startPersistState",
|
|
"type": "boolean",
|
|
"description": "Persist the state in the same session",
|
|
"optional": true,
|
|
"id": "startAgentflow_0-input-startPersistState-boolean",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"startInputType": "chatInput",
|
|
"formTitle": "",
|
|
"formDescription": "",
|
|
"formInputTypes": "",
|
|
"startEphemeralMemory": "",
|
|
"startState": [
|
|
{
|
|
"key": "sqlQuery",
|
|
"value": ""
|
|
}
|
|
],
|
|
"startPersistState": ""
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "startAgentflow_0-output-startAgentflow",
|
|
"label": "Start",
|
|
"name": "startAgentflow"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"width": 103,
|
|
"height": 66,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": -97,
|
|
"y": 108
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "customFunctionAgentflow_0",
|
|
"position": {
|
|
"x": 58.5,
|
|
"y": 109
|
|
},
|
|
"data": {
|
|
"id": "customFunctionAgentflow_0",
|
|
"label": "Get DB Schema",
|
|
"version": 1,
|
|
"name": "customFunctionAgentflow",
|
|
"type": "CustomFunction",
|
|
"color": "#E4B7FF",
|
|
"baseClasses": ["CustomFunction"],
|
|
"category": "Agent Flows",
|
|
"description": "Execute custom function",
|
|
"inputParams": [
|
|
{
|
|
"label": "Input Variables",
|
|
"name": "customFunctionInputVariables",
|
|
"description": "Input variables can be used in the function with prefix $. For example: $foo",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Variable Name",
|
|
"name": "variableName",
|
|
"type": "string"
|
|
},
|
|
{
|
|
"label": "Variable Value",
|
|
"name": "variableValue",
|
|
"type": "string",
|
|
"acceptVariable": true
|
|
}
|
|
],
|
|
"id": "customFunctionAgentflow_0-input-customFunctionInputVariables-array",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Javascript Function",
|
|
"name": "customFunctionJavascriptFunction",
|
|
"type": "code",
|
|
"codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Schema as variables. Ex: Property = userid, Variable = $userid\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get custom variables: $vars.<variable-name>\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}",
|
|
"description": "The function to execute. Must return a string or an object that can be converted to a string.",
|
|
"id": "customFunctionAgentflow_0-input-customFunctionJavascriptFunction-code",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Update Flow State",
|
|
"name": "customFunctionUpdateState",
|
|
"description": "Update runtime state during the execution of the workflow",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Key",
|
|
"name": "key",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listRuntimeStateKeys",
|
|
"freeSolo": true
|
|
},
|
|
{
|
|
"label": "Value",
|
|
"name": "value",
|
|
"type": "string",
|
|
"acceptVariable": true,
|
|
"acceptNodeOutputAsVariable": true
|
|
}
|
|
],
|
|
"id": "customFunctionAgentflow_0-input-customFunctionUpdateState-array",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"customFunctionInputVariables": "",
|
|
"customFunctionJavascriptFunction": "const { DataSource } = require('typeorm');\nconst { Pool } = require('pg');\n\nconst HOST = 'localhost';\nconst USER = 'testuser';\nconst PASSWORD = 'testpwd';\nconst DATABASE = 'abudhabi';\nconst PORT = 5555;\n\nlet sqlSchemaPrompt = '';\n\nconst AppDataSource = new DataSource({\n type: 'postgres',\n host: HOST,\n port: PORT,\n username: USER,\n password: PASSWORD,\n database: DATABASE,\n synchronize: false,\n logging: false,\n});\n\nasync function getSQLPrompt() {\n try {\n await AppDataSource.initialize();\n const queryRunner = AppDataSource.createQueryRunner();\n\n // Get all user-defined tables (excluding system tables)\n const tablesResult = await queryRunner.query(`\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = 'public' AND table_type = 'BASE TABLE'\n `);\n\n for (const tableRow of tablesResult) {\n const tableName = tableRow.table_name;\n\n const schemaInfo = await queryRunner.query(`\n SELECT column_name, data_type, is_nullable\n FROM information_schema.columns\n WHERE table_name = '${tableName}'\n `);\n\n const createColumns = [];\n const columnNames = [];\n\n for (const column of schemaInfo) {\n const name = column.column_name;\n const type = column.data_type.toUpperCase();\n const notNull = column.is_nullable === 'NO' ? 'NOT NULL' : '';\n columnNames.push(name);\n createColumns.push(`${name} ${type} ${notNull}`);\n }\n\n const sqlCreateTableQuery = `CREATE TABLE ${tableName} (${createColumns.join(', ')})`;\n const sqlSelectTableQuery = `SELECT * FROM ${tableName} LIMIT 3`;\n\n let allValues = [];\n try {\n const rows = await queryRunner.query(sqlSelectTableQuery);\n\n allValues = rows.map(row =>\n columnNames.map(col => row[col]).join(' ')\n );\n } catch (err) {\n allValues.push('[ERROR FETCHING ROWS]');\n }\n\n sqlSchemaPrompt +=\n sqlCreateTableQuery +\n '\\n' +\n sqlSelectTableQuery +\n '\\n' +\n columnNames.join(' ') +\n '\\n' +\n allValues.join('\\n') +\n '\\n\\n';\n }\n\n await queryRunner.release();\n } catch (err) {\n console.error(err);\n throw err;\n }\n}\n\nasync function main() {\n await getSQLPrompt();\n}\n\nawait main();\n\nreturn sqlSchemaPrompt;\n",
|
|
"customFunctionUpdateState": ""
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "customFunctionAgentflow_0-output-customFunctionAgentflow",
|
|
"label": "Custom Function",
|
|
"name": "customFunctionAgentflow"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "agentFlow",
|
|
"width": 173,
|
|
"height": 66,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 58.5,
|
|
"y": 109
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "llmAgentflow_0",
|
|
"position": {
|
|
"x": 272.7184381707814,
|
|
"y": 106.61165168988839
|
|
},
|
|
"data": {
|
|
"id": "llmAgentflow_0",
|
|
"label": "Generate SQL Query",
|
|
"version": 1,
|
|
"name": "llmAgentflow",
|
|
"type": "LLM",
|
|
"color": "#64B5F6",
|
|
"baseClasses": ["LLM"],
|
|
"category": "Agent Flows",
|
|
"description": "Large language models to analyze user-provided inputs and generate responses",
|
|
"inputParams": [
|
|
{
|
|
"label": "Model",
|
|
"name": "llmModel",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listModels",
|
|
"loadConfig": true,
|
|
"id": "llmAgentflow_0-input-llmModel-asyncOptions",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Messages",
|
|
"name": "llmMessages",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Role",
|
|
"name": "role",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "System",
|
|
"name": "system"
|
|
},
|
|
{
|
|
"label": "Assistant",
|
|
"name": "assistant"
|
|
},
|
|
{
|
|
"label": "Developer",
|
|
"name": "developer"
|
|
},
|
|
{
|
|
"label": "User",
|
|
"name": "user"
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"label": "Content",
|
|
"name": "content",
|
|
"type": "string",
|
|
"acceptVariable": true,
|
|
"generateInstruction": true,
|
|
"rows": 4
|
|
}
|
|
],
|
|
"id": "llmAgentflow_0-input-llmMessages-array",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Enable Memory",
|
|
"name": "llmEnableMemory",
|
|
"type": "boolean",
|
|
"description": "Enable memory for the conversation thread",
|
|
"default": true,
|
|
"optional": true,
|
|
"id": "llmAgentflow_0-input-llmEnableMemory-boolean",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Memory Type",
|
|
"name": "llmMemoryType",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "All Messages",
|
|
"name": "allMessages",
|
|
"description": "Retrieve all messages from the conversation"
|
|
},
|
|
{
|
|
"label": "Window Size",
|
|
"name": "windowSize",
|
|
"description": "Uses a fixed window size to surface the last N messages"
|
|
},
|
|
{
|
|
"label": "Conversation Summary",
|
|
"name": "conversationSummary",
|
|
"description": "Summarizes the whole conversation"
|
|
},
|
|
{
|
|
"label": "Conversation Summary Buffer",
|
|
"name": "conversationSummaryBuffer",
|
|
"description": "Summarize conversations once token limit is reached. Default to 2000"
|
|
}
|
|
],
|
|
"optional": true,
|
|
"default": "allMessages",
|
|
"show": {
|
|
"llmEnableMemory": true
|
|
},
|
|
"id": "llmAgentflow_0-input-llmMemoryType-options",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Window Size",
|
|
"name": "llmMemoryWindowSize",
|
|
"type": "number",
|
|
"default": "20",
|
|
"description": "Uses a fixed window size to surface the last N messages",
|
|
"show": {
|
|
"llmMemoryType": "windowSize"
|
|
},
|
|
"id": "llmAgentflow_0-input-llmMemoryWindowSize-number",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Max Token Limit",
|
|
"name": "llmMemoryMaxTokenLimit",
|
|
"type": "number",
|
|
"default": "2000",
|
|
"description": "Summarize conversations once token limit is reached. Default to 2000",
|
|
"show": {
|
|
"llmMemoryType": "conversationSummaryBuffer"
|
|
},
|
|
"id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Input Message",
|
|
"name": "llmUserMessage",
|
|
"type": "string",
|
|
"description": "Add an input message as user message at the end of the conversation",
|
|
"rows": 4,
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"show": {
|
|
"llmEnableMemory": true
|
|
},
|
|
"id": "llmAgentflow_0-input-llmUserMessage-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Return Response As",
|
|
"name": "llmReturnResponseAs",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "User Message",
|
|
"name": "userMessage"
|
|
},
|
|
{
|
|
"label": "Assistant Message",
|
|
"name": "assistantMessage"
|
|
}
|
|
],
|
|
"default": "userMessage",
|
|
"id": "llmAgentflow_0-input-llmReturnResponseAs-options",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "JSON Structured Output",
|
|
"name": "llmStructuredOutput",
|
|
"description": "Instruct the LLM to give output in a JSON structured schema",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Key",
|
|
"name": "key",
|
|
"type": "string"
|
|
},
|
|
{
|
|
"label": "Type",
|
|
"name": "type",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "String",
|
|
"name": "string"
|
|
},
|
|
{
|
|
"label": "String Array",
|
|
"name": "stringArray"
|
|
},
|
|
{
|
|
"label": "Number",
|
|
"name": "number"
|
|
},
|
|
{
|
|
"label": "Boolean",
|
|
"name": "boolean"
|
|
},
|
|
{
|
|
"label": "Enum",
|
|
"name": "enum"
|
|
},
|
|
{
|
|
"label": "JSON Array",
|
|
"name": "jsonArray"
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"label": "Enum Values",
|
|
"name": "enumValues",
|
|
"type": "string",
|
|
"placeholder": "value1, value2, value3",
|
|
"description": "Enum values. Separated by comma",
|
|
"optional": true,
|
|
"show": {
|
|
"llmStructuredOutput[$index].type": "enum"
|
|
}
|
|
},
|
|
{
|
|
"label": "JSON Schema",
|
|
"name": "jsonSchema",
|
|
"type": "code",
|
|
"placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}",
|
|
"description": "JSON schema for the structured output",
|
|
"optional": true,
|
|
"show": {
|
|
"llmStructuredOutput[$index].type": "jsonArray"
|
|
}
|
|
},
|
|
{
|
|
"label": "Description",
|
|
"name": "description",
|
|
"type": "string",
|
|
"placeholder": "Description of the key"
|
|
}
|
|
],
|
|
"id": "llmAgentflow_0-input-llmStructuredOutput-array",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Update Flow State",
|
|
"name": "llmUpdateState",
|
|
"description": "Update runtime state during the execution of the workflow",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Key",
|
|
"name": "key",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listRuntimeStateKeys",
|
|
"freeSolo": true
|
|
},
|
|
{
|
|
"label": "Value",
|
|
"name": "value",
|
|
"type": "string",
|
|
"acceptVariable": true,
|
|
"acceptNodeOutputAsVariable": true
|
|
}
|
|
],
|
|
"id": "llmAgentflow_0-input-llmUpdateState-array",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"llmModel": "chatAnthropic",
|
|
"llmMessages": [
|
|
{
|
|
"role": "system",
|
|
"content": "<p>You are an agent designed to interact with a SQL database. Given an input question, create a syntactically correct sqlite query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most 5 results. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for the relevant columns given the question. DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.</p><p>Here is the relevant table info:</p><p><span class=\"variable\" data-type=\"mention\" data-id=\"customFunctionAgentflow_0\" data-label=\"customFunctionAgentflow_0\">{{ customFunctionAgentflow_0 }}</span></p><p>Note:</p><ul><li><p> Only generate ONE SQL query</p></li></ul><p></p>"
|
|
}
|
|
],
|
|
"llmEnableMemory": true,
|
|
"llmMemoryType": "allMessages",
|
|
"llmUserMessage": "",
|
|
"llmReturnResponseAs": "userMessage",
|
|
"llmStructuredOutput": [
|
|
{
|
|
"key": "sql_query",
|
|
"type": "string",
|
|
"enumValues": "",
|
|
"jsonSchema": "",
|
|
"description": "SQL query"
|
|
}
|
|
],
|
|
"llmUpdateState": [
|
|
{
|
|
"key": "sqlQuery",
|
|
"value": "<p><span class=\"variable\" data-type=\"mention\" data-id=\"output.sql_query\" data-label=\"output.sql_query\">{{ output.sql_query }}</span> </p>"
|
|
}
|
|
],
|
|
"llmModelConfig": {
|
|
"credential": "",
|
|
"modelName": "claude-sonnet-4-0",
|
|
"temperature": 0.9,
|
|
"streaming": true,
|
|
"maxTokensToSample": "",
|
|
"topP": "",
|
|
"topK": "",
|
|
"extendedThinking": "",
|
|
"budgetTokens": 1024,
|
|
"allowImageUploads": "",
|
|
"llmModel": "chatAnthropic"
|
|
}
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "llmAgentflow_0-output-llmAgentflow",
|
|
"label": "LLM",
|
|
"name": "llmAgentflow"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "agentFlow",
|
|
"width": 213,
|
|
"height": 72,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 272.7184381707814,
|
|
"y": 106.61165168988839
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "conditionAgentAgentflow_0",
|
|
"position": {
|
|
"x": 511.16504493033483,
|
|
"y": 101.98220225318451
|
|
},
|
|
"data": {
|
|
"id": "conditionAgentAgentflow_0",
|
|
"label": "Check SQL Query",
|
|
"version": 1,
|
|
"name": "conditionAgentAgentflow",
|
|
"type": "ConditionAgent",
|
|
"color": "#ff8fab",
|
|
"baseClasses": ["ConditionAgent"],
|
|
"category": "Agent Flows",
|
|
"description": "Utilize an agent to split flows based on dynamic conditions",
|
|
"inputParams": [
|
|
{
|
|
"label": "Model",
|
|
"name": "conditionAgentModel",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listModels",
|
|
"loadConfig": true,
|
|
"id": "conditionAgentAgentflow_0-input-conditionAgentModel-asyncOptions",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Instructions",
|
|
"name": "conditionAgentInstructions",
|
|
"type": "string",
|
|
"description": "A general instructions of what the condition agent should do",
|
|
"rows": 4,
|
|
"acceptVariable": true,
|
|
"placeholder": "Determine if the user is interested in learning about AI",
|
|
"id": "conditionAgentAgentflow_0-input-conditionAgentInstructions-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Input",
|
|
"name": "conditionAgentInput",
|
|
"type": "string",
|
|
"description": "Input to be used for the condition agent",
|
|
"rows": 4,
|
|
"acceptVariable": true,
|
|
"default": "<p><span class=\"variable\" data-type=\"mention\" data-id=\"question\" data-label=\"question\">{{ question }}</span> </p>",
|
|
"id": "conditionAgentAgentflow_0-input-conditionAgentInput-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Scenarios",
|
|
"name": "conditionAgentScenarios",
|
|
"description": "Define the scenarios that will be used as the conditions to split the flow",
|
|
"type": "array",
|
|
"array": [
|
|
{
|
|
"label": "Scenario",
|
|
"name": "scenario",
|
|
"type": "string",
|
|
"placeholder": "User is asking for a pizza"
|
|
}
|
|
],
|
|
"default": [
|
|
{
|
|
"scenario": "SQL query is correct and does not contains mistakes"
|
|
},
|
|
{
|
|
"scenario": "SQL query contains mistakes"
|
|
}
|
|
],
|
|
"id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"conditionAgentModel": "chatOpenAI",
|
|
"conditionAgentInstructions": "<p>You are a SQL expert with a strong attention to detail. Double check the SQL query for common mistakes, including:</p><p>- Using NOT IN with NULL values</p><p>- Using UNION when UNION ALL should have been used</p><p>- Using BETWEEN for exclusive ranges</p><p>- Data type mismatch in predicates</p><p>- Properly quoting identifiers</p><p>- Using the correct number of arguments for functions</p><p>- Casting to the correct data type</p><p>- Using the proper columns for joins</p>",
|
|
"conditionAgentInput": "<p><span class=\"variable\" data-type=\"mention\" data-id=\"$flow.state.sqlQuery\" data-label=\"$flow.state.sqlQuery\">{{ $flow.state.sqlQuery }}</span> </p>",
|
|
"conditionAgentScenarios": [
|
|
{
|
|
"scenario": "SQL query is correct and does not contains mistakes"
|
|
},
|
|
{
|
|
"scenario": "SQL query contains mistakes"
|
|
}
|
|
],
|
|
"conditionAgentModelConfig": {
|
|
"credential": "",
|
|
"modelName": "gpt-4o-mini",
|
|
"temperature": 0.9,
|
|
"streaming": true,
|
|
"maxTokens": "",
|
|
"topP": "",
|
|
"frequencyPenalty": "",
|
|
"presencePenalty": "",
|
|
"timeout": "",
|
|
"strictToolCalling": "",
|
|
"stopSequence": "",
|
|
"basepath": "",
|
|
"proxyUrl": "",
|
|
"baseOptions": "",
|
|
"allowImageUploads": "",
|
|
"imageResolution": "low",
|
|
"reasoningEffort": "",
|
|
"conditionAgentModel": "chatOpenAI"
|
|
}
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "conditionAgentAgentflow_0-output-0",
|
|
"label": "Condition Agent",
|
|
"name": "conditionAgentAgentflow"
|
|
},
|
|
{
|
|
"id": "conditionAgentAgentflow_0-output-1",
|
|
"label": "Condition Agent",
|
|
"name": "conditionAgentAgentflow"
|
|
}
|
|
],
|
|
"outputs": {
|
|
"conditionAgentAgentflow": ""
|
|
},
|
|
"selected": false
|
|
},
|
|
"type": "agentFlow",
|
|
"width": 187,
|
|
"height": 80,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 511.16504493033483,
|
|
"y": 101.98220225318451
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "loopAgentflow_0",
|
|
"position": {
|
|
"x": 762.44734302386,
|
|
"y": 182.95996068910745
|
|
},
|
|
"data": {
|
|
"id": "loopAgentflow_0",
|
|
"label": "Regenerate Query",
|
|
"version": 1,
|
|
"name": "loopAgentflow",
|
|
"type": "Loop",
|
|
"color": "#FFA07A",
|
|
"hideOutput": true,
|
|
"baseClasses": ["Loop"],
|
|
"category": "Agent Flows",
|
|
"description": "Loop back to a previous node",
|
|
"inputParams": [
|
|
{
|
|
"label": "Loop Back To",
|
|
"name": "loopBackToNode",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listPreviousNodes",
|
|
"freeSolo": true,
|
|
"id": "loopAgentflow_0-input-loopBackToNode-asyncOptions",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Max Loop Count",
|
|
"name": "maxLoopCount",
|
|
"type": "number",
|
|
"default": 5,
|
|
"id": "loopAgentflow_0-input-maxLoopCount-number",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"loopBackToNode": "llmAgentflow_0-Generate SQL Query",
|
|
"maxLoopCount": 5
|
|
},
|
|
"outputAnchors": [],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "agentFlow",
|
|
"width": 190,
|
|
"height": 66,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 762.44734302386,
|
|
"y": 182.95996068910745
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "customFunctionAgentflow_1",
|
|
"position": {
|
|
"x": 761.3261621815544,
|
|
"y": 44.65096212173265
|
|
},
|
|
"data": {
|
|
"id": "customFunctionAgentflow_1",
|
|
"label": "Run SQL Query",
|
|
"version": 1,
|
|
"name": "customFunctionAgentflow",
|
|
"type": "CustomFunction",
|
|
"color": "#E4B7FF",
|
|
"baseClasses": ["CustomFunction"],
|
|
"category": "Agent Flows",
|
|
"description": "Execute custom function",
|
|
"inputParams": [
|
|
{
|
|
"label": "Input Variables",
|
|
"name": "customFunctionInputVariables",
|
|
"description": "Input variables can be used in the function with prefix $. For example: $foo",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Variable Name",
|
|
"name": "variableName",
|
|
"type": "string"
|
|
},
|
|
{
|
|
"label": "Variable Value",
|
|
"name": "variableValue",
|
|
"type": "string",
|
|
"acceptVariable": true
|
|
}
|
|
],
|
|
"id": "customFunctionAgentflow_1-input-customFunctionInputVariables-array",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Javascript Function",
|
|
"name": "customFunctionJavascriptFunction",
|
|
"type": "code",
|
|
"codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Schema as variables. Ex: Property = userid, Variable = $userid\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get custom variables: $vars.<variable-name>\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}",
|
|
"description": "The function to execute. Must return a string or an object that can be converted to a string.",
|
|
"id": "customFunctionAgentflow_1-input-customFunctionJavascriptFunction-code",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Update Flow State",
|
|
"name": "customFunctionUpdateState",
|
|
"description": "Update runtime state during the execution of the workflow",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Key",
|
|
"name": "key",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listRuntimeStateKeys",
|
|
"freeSolo": true
|
|
},
|
|
{
|
|
"label": "Value",
|
|
"name": "value",
|
|
"type": "string",
|
|
"acceptVariable": true,
|
|
"acceptNodeOutputAsVariable": true
|
|
}
|
|
],
|
|
"id": "customFunctionAgentflow_1-input-customFunctionUpdateState-array",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"customFunctionInputVariables": [
|
|
{
|
|
"variableName": "sqlQuery",
|
|
"variableValue": "<p><span class=\"variable\" data-type=\"mention\" data-id=\"$flow.state.sqlQuery\" data-label=\"$flow.state.sqlQuery\">{{ $flow.state.sqlQuery }}</span> </p>"
|
|
}
|
|
],
|
|
"customFunctionJavascriptFunction": "const { DataSource } = require('typeorm');\nconst { Pool } = require('pg');\n\n// Configuration\nconst HOST = 'localhost';\nconst USER = 'testuser';\nconst PASSWORD = 'testpwd';\nconst DATABASE = 'abudhabi';\nconst PORT = 5555;\n\nconst sqlQuery = $sqlQuery;\n\nconst AppDataSource = new DataSource({\n type: 'postgres',\n host: HOST,\n port: PORT,\n username: USER,\n password: PASSWORD,\n database: DATABASE,\n synchronize: false,\n logging: false,\n});\n\nlet formattedResult = '';\n\nasync function runSQLQuery(query) {\n try {\n await AppDataSource.initialize();\n const queryRunner = AppDataSource.createQueryRunner();\n\n const rows = await queryRunner.query(query);\n console.log('rows =', rows);\n\n if (rows.length === 0) {\n formattedResult = '[No results returned]';\n } else {\n const columnNames = Object.keys(rows[0]);\n const header = columnNames.join(' ');\n const values = rows.map(row =>\n columnNames.map(col => row[col]).join(' ')\n );\n\n formattedResult = query + '\\n' + header + '\\n' + values.join('\\n');\n }\n\n await queryRunner.release();\n } catch (err) {\n console.error('[ERROR]', err);\n formattedResult = `[Error executing query]: ${err}`;\n }\n\n return formattedResult;\n}\n\nasync function main() {\n formattedResult = await runSQLQuery(sqlQuery);\n}\n\nawait main();\n\nreturn formattedResult;\n",
|
|
"customFunctionUpdateState": ""
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "customFunctionAgentflow_1-output-customFunctionAgentflow",
|
|
"label": "Custom Function",
|
|
"name": "customFunctionAgentflow"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "agentFlow",
|
|
"width": 171,
|
|
"height": 66,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 761.3261621815544,
|
|
"y": 44.65096212173265
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "llmAgentflow_1",
|
|
"position": {
|
|
"x": 1238.7660285501179,
|
|
"y": 20.56658816269558
|
|
},
|
|
"data": {
|
|
"id": "llmAgentflow_1",
|
|
"label": "Return Response",
|
|
"version": 1,
|
|
"name": "llmAgentflow",
|
|
"type": "LLM",
|
|
"color": "#64B5F6",
|
|
"baseClasses": ["LLM"],
|
|
"category": "Agent Flows",
|
|
"description": "Large language models to analyze user-provided inputs and generate responses",
|
|
"inputParams": [
|
|
{
|
|
"label": "Model",
|
|
"name": "llmModel",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listModels",
|
|
"loadConfig": true,
|
|
"id": "llmAgentflow_1-input-llmModel-asyncOptions",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Messages",
|
|
"name": "llmMessages",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Role",
|
|
"name": "role",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "System",
|
|
"name": "system"
|
|
},
|
|
{
|
|
"label": "Assistant",
|
|
"name": "assistant"
|
|
},
|
|
{
|
|
"label": "Developer",
|
|
"name": "developer"
|
|
},
|
|
{
|
|
"label": "User",
|
|
"name": "user"
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"label": "Content",
|
|
"name": "content",
|
|
"type": "string",
|
|
"acceptVariable": true,
|
|
"generateInstruction": true,
|
|
"rows": 4
|
|
}
|
|
],
|
|
"id": "llmAgentflow_1-input-llmMessages-array",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Enable Memory",
|
|
"name": "llmEnableMemory",
|
|
"type": "boolean",
|
|
"description": "Enable memory for the conversation thread",
|
|
"default": true,
|
|
"optional": true,
|
|
"id": "llmAgentflow_1-input-llmEnableMemory-boolean",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Memory Type",
|
|
"name": "llmMemoryType",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "All Messages",
|
|
"name": "allMessages",
|
|
"description": "Retrieve all messages from the conversation"
|
|
},
|
|
{
|
|
"label": "Window Size",
|
|
"name": "windowSize",
|
|
"description": "Uses a fixed window size to surface the last N messages"
|
|
},
|
|
{
|
|
"label": "Conversation Summary",
|
|
"name": "conversationSummary",
|
|
"description": "Summarizes the whole conversation"
|
|
},
|
|
{
|
|
"label": "Conversation Summary Buffer",
|
|
"name": "conversationSummaryBuffer",
|
|
"description": "Summarize conversations once token limit is reached. Default to 2000"
|
|
}
|
|
],
|
|
"optional": true,
|
|
"default": "allMessages",
|
|
"show": {
|
|
"llmEnableMemory": true
|
|
},
|
|
"id": "llmAgentflow_1-input-llmMemoryType-options",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Window Size",
|
|
"name": "llmMemoryWindowSize",
|
|
"type": "number",
|
|
"default": "20",
|
|
"description": "Uses a fixed window size to surface the last N messages",
|
|
"show": {
|
|
"llmMemoryType": "windowSize"
|
|
},
|
|
"id": "llmAgentflow_1-input-llmMemoryWindowSize-number",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Max Token Limit",
|
|
"name": "llmMemoryMaxTokenLimit",
|
|
"type": "number",
|
|
"default": "2000",
|
|
"description": "Summarize conversations once token limit is reached. Default to 2000",
|
|
"show": {
|
|
"llmMemoryType": "conversationSummaryBuffer"
|
|
},
|
|
"id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Input Message",
|
|
"name": "llmUserMessage",
|
|
"type": "string",
|
|
"description": "Add an input message as user message at the end of the conversation",
|
|
"rows": 4,
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"show": {
|
|
"llmEnableMemory": true
|
|
},
|
|
"id": "llmAgentflow_1-input-llmUserMessage-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Return Response As",
|
|
"name": "llmReturnResponseAs",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "User Message",
|
|
"name": "userMessage"
|
|
},
|
|
{
|
|
"label": "Assistant Message",
|
|
"name": "assistantMessage"
|
|
}
|
|
],
|
|
"default": "userMessage",
|
|
"id": "llmAgentflow_1-input-llmReturnResponseAs-options",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "JSON Structured Output",
|
|
"name": "llmStructuredOutput",
|
|
"description": "Instruct the LLM to give output in a JSON structured schema",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Key",
|
|
"name": "key",
|
|
"type": "string"
|
|
},
|
|
{
|
|
"label": "Type",
|
|
"name": "type",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "String",
|
|
"name": "string"
|
|
},
|
|
{
|
|
"label": "String Array",
|
|
"name": "stringArray"
|
|
},
|
|
{
|
|
"label": "Number",
|
|
"name": "number"
|
|
},
|
|
{
|
|
"label": "Boolean",
|
|
"name": "boolean"
|
|
},
|
|
{
|
|
"label": "Enum",
|
|
"name": "enum"
|
|
},
|
|
{
|
|
"label": "JSON Array",
|
|
"name": "jsonArray"
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"label": "Enum Values",
|
|
"name": "enumValues",
|
|
"type": "string",
|
|
"placeholder": "value1, value2, value3",
|
|
"description": "Enum values. Separated by comma",
|
|
"optional": true,
|
|
"show": {
|
|
"llmStructuredOutput[$index].type": "enum"
|
|
}
|
|
},
|
|
{
|
|
"label": "JSON Schema",
|
|
"name": "jsonSchema",
|
|
"type": "code",
|
|
"placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}",
|
|
"description": "JSON schema for the structured output",
|
|
"optional": true,
|
|
"show": {
|
|
"llmStructuredOutput[$index].type": "jsonArray"
|
|
}
|
|
},
|
|
{
|
|
"label": "Description",
|
|
"name": "description",
|
|
"type": "string",
|
|
"placeholder": "Description of the key"
|
|
}
|
|
],
|
|
"id": "llmAgentflow_1-input-llmStructuredOutput-array",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Update Flow State",
|
|
"name": "llmUpdateState",
|
|
"description": "Update runtime state during the execution of the workflow",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Key",
|
|
"name": "key",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listRuntimeStateKeys",
|
|
"freeSolo": true
|
|
},
|
|
{
|
|
"label": "Value",
|
|
"name": "value",
|
|
"type": "string",
|
|
"acceptVariable": true,
|
|
"acceptNodeOutputAsVariable": true
|
|
}
|
|
],
|
|
"id": "llmAgentflow_1-input-llmUpdateState-array",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"llmModel": "chatGoogleGenerativeAI",
|
|
"llmMessages": [],
|
|
"llmEnableMemory": true,
|
|
"llmMemoryType": "allMessages",
|
|
"llmUserMessage": "<p><span class=\"variable\" data-type=\"mention\" data-id=\"customFunctionAgentflow_1\" data-label=\"customFunctionAgentflow_1\">{{ customFunctionAgentflow_1 }}</span> </p>",
|
|
"llmReturnResponseAs": "userMessage",
|
|
"llmStructuredOutput": "",
|
|
"llmUpdateState": "",
|
|
"llmModelConfig": {
|
|
"credential": "",
|
|
"modelName": "gemini-2.0-flash",
|
|
"customModelName": "",
|
|
"temperature": 0.9,
|
|
"streaming": true,
|
|
"maxOutputTokens": "",
|
|
"topP": "",
|
|
"topK": "",
|
|
"harmCategory": "",
|
|
"harmBlockThreshold": "",
|
|
"baseUrl": "",
|
|
"allowImageUploads": "",
|
|
"llmModel": "chatGoogleGenerativeAI"
|
|
},
|
|
"undefined": ""
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "llmAgentflow_1-output-llmAgentflow",
|
|
"label": "LLM",
|
|
"name": "llmAgentflow"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "agentFlow",
|
|
"width": 199,
|
|
"height": 72,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 1238.7660285501179,
|
|
"y": 20.56658816269558
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "conditionAgentAgentflow_1",
|
|
"position": {
|
|
"x": 966.5436041632489,
|
|
"y": 57.77868724229256
|
|
},
|
|
"data": {
|
|
"id": "conditionAgentAgentflow_1",
|
|
"label": "Check Result",
|
|
"version": 1,
|
|
"name": "conditionAgentAgentflow",
|
|
"type": "ConditionAgent",
|
|
"color": "#ff8fab",
|
|
"baseClasses": ["ConditionAgent"],
|
|
"category": "Agent Flows",
|
|
"description": "Utilize an agent to split flows based on dynamic conditions",
|
|
"inputParams": [
|
|
{
|
|
"label": "Model",
|
|
"name": "conditionAgentModel",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listModels",
|
|
"loadConfig": true,
|
|
"id": "conditionAgentAgentflow_1-input-conditionAgentModel-asyncOptions",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Instructions",
|
|
"name": "conditionAgentInstructions",
|
|
"type": "string",
|
|
"description": "A general instructions of what the condition agent should do",
|
|
"rows": 4,
|
|
"acceptVariable": true,
|
|
"placeholder": "Determine if the user is interested in learning about AI",
|
|
"id": "conditionAgentAgentflow_1-input-conditionAgentInstructions-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Input",
|
|
"name": "conditionAgentInput",
|
|
"type": "string",
|
|
"description": "Input to be used for the condition agent",
|
|
"rows": 4,
|
|
"acceptVariable": true,
|
|
"default": "<p><span class=\"variable\" data-type=\"mention\" data-id=\"question\" data-label=\"question\">{{ question }}</span> </p>",
|
|
"id": "conditionAgentAgentflow_1-input-conditionAgentInput-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Scenarios",
|
|
"name": "conditionAgentScenarios",
|
|
"description": "Define the scenarios that will be used as the conditions to split the flow",
|
|
"type": "array",
|
|
"array": [
|
|
{
|
|
"label": "Scenario",
|
|
"name": "scenario",
|
|
"type": "string",
|
|
"placeholder": "User is asking for a pizza"
|
|
}
|
|
],
|
|
"default": [
|
|
{
|
|
"scenario": "Result is correct and does not contains error"
|
|
},
|
|
{
|
|
"scenario": "Result query contains error"
|
|
}
|
|
],
|
|
"id": "conditionAgentAgentflow_1-input-conditionAgentScenarios-array",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"conditionAgentModel": "chatMistralAI",
|
|
"conditionAgentInstructions": "<p>You are a SQL expert. Check if the query result is correct or contains error.</p>",
|
|
"conditionAgentInput": "<p><span class=\"variable\" data-type=\"mention\" data-id=\"customFunctionAgentflow_1\" data-label=\"customFunctionAgentflow_1\">{{ customFunctionAgentflow_1 }}</span> </p>",
|
|
"conditionAgentScenarios": [
|
|
{
|
|
"scenario": "Result is correct and does not contains error"
|
|
},
|
|
{
|
|
"scenario": "Result query contains error"
|
|
}
|
|
],
|
|
"conditionAgentModelConfig": {
|
|
"credential": "",
|
|
"modelName": "mistral-medium-latest",
|
|
"temperature": 0.9,
|
|
"streaming": true,
|
|
"maxOutputTokens": "",
|
|
"topP": "",
|
|
"randomSeed": "",
|
|
"safeMode": "",
|
|
"overrideEndpoint": "",
|
|
"conditionAgentModel": "chatMistralAI"
|
|
}
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "conditionAgentAgentflow_1-output-0",
|
|
"label": "Condition Agent",
|
|
"name": "conditionAgentAgentflow"
|
|
},
|
|
{
|
|
"id": "conditionAgentAgentflow_1-output-1",
|
|
"label": "Condition Agent",
|
|
"name": "conditionAgentAgentflow"
|
|
}
|
|
],
|
|
"outputs": {
|
|
"conditionAgentAgentflow": ""
|
|
},
|
|
"selected": false
|
|
},
|
|
"type": "agentFlow",
|
|
"width": 228,
|
|
"height": 80,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 966.5436041632489,
|
|
"y": 57.77868724229256
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "loopAgentflow_1",
|
|
"position": {
|
|
"x": 1501.0055934843515,
|
|
"y": 140.83809747682727
|
|
},
|
|
"data": {
|
|
"id": "loopAgentflow_1",
|
|
"label": "Recheck SQL Query",
|
|
"version": 1,
|
|
"name": "loopAgentflow",
|
|
"type": "Loop",
|
|
"color": "#FFA07A",
|
|
"hideOutput": true,
|
|
"baseClasses": ["Loop"],
|
|
"category": "Agent Flows",
|
|
"description": "Loop back to a previous node",
|
|
"inputParams": [
|
|
{
|
|
"label": "Loop Back To",
|
|
"name": "loopBackToNode",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listPreviousNodes",
|
|
"freeSolo": true,
|
|
"id": "loopAgentflow_1-input-loopBackToNode-asyncOptions",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Max Loop Count",
|
|
"name": "maxLoopCount",
|
|
"type": "number",
|
|
"default": 5,
|
|
"id": "loopAgentflow_1-input-maxLoopCount-number",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"loopBackToNode": "conditionAgentAgentflow_0-Check SQL Query",
|
|
"maxLoopCount": 5,
|
|
"undefined": ""
|
|
},
|
|
"outputAnchors": [],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "agentFlow",
|
|
"width": 202,
|
|
"height": 66,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 1501.0055934843515,
|
|
"y": 140.83809747682727
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "llmAgentflow_2",
|
|
"position": {
|
|
"x": 1235.4868883628933,
|
|
"y": 137.82100195002667
|
|
},
|
|
"data": {
|
|
"id": "llmAgentflow_2",
|
|
"label": "Regenerate SQL Query",
|
|
"version": 1,
|
|
"name": "llmAgentflow",
|
|
"type": "LLM",
|
|
"color": "#64B5F6",
|
|
"baseClasses": ["LLM"],
|
|
"category": "Agent Flows",
|
|
"description": "Large language models to analyze user-provided inputs and generate responses",
|
|
"inputParams": [
|
|
{
|
|
"label": "Model",
|
|
"name": "llmModel",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listModels",
|
|
"loadConfig": true,
|
|
"id": "llmAgentflow_2-input-llmModel-asyncOptions",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Messages",
|
|
"name": "llmMessages",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Role",
|
|
"name": "role",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "System",
|
|
"name": "system"
|
|
},
|
|
{
|
|
"label": "Assistant",
|
|
"name": "assistant"
|
|
},
|
|
{
|
|
"label": "Developer",
|
|
"name": "developer"
|
|
},
|
|
{
|
|
"label": "User",
|
|
"name": "user"
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"label": "Content",
|
|
"name": "content",
|
|
"type": "string",
|
|
"acceptVariable": true,
|
|
"generateInstruction": true,
|
|
"rows": 4
|
|
}
|
|
],
|
|
"id": "llmAgentflow_2-input-llmMessages-array",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Enable Memory",
|
|
"name": "llmEnableMemory",
|
|
"type": "boolean",
|
|
"description": "Enable memory for the conversation thread",
|
|
"default": true,
|
|
"optional": true,
|
|
"id": "llmAgentflow_2-input-llmEnableMemory-boolean",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Memory Type",
|
|
"name": "llmMemoryType",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "All Messages",
|
|
"name": "allMessages",
|
|
"description": "Retrieve all messages from the conversation"
|
|
},
|
|
{
|
|
"label": "Window Size",
|
|
"name": "windowSize",
|
|
"description": "Uses a fixed window size to surface the last N messages"
|
|
},
|
|
{
|
|
"label": "Conversation Summary",
|
|
"name": "conversationSummary",
|
|
"description": "Summarizes the whole conversation"
|
|
},
|
|
{
|
|
"label": "Conversation Summary Buffer",
|
|
"name": "conversationSummaryBuffer",
|
|
"description": "Summarize conversations once token limit is reached. Default to 2000"
|
|
}
|
|
],
|
|
"optional": true,
|
|
"default": "allMessages",
|
|
"show": {
|
|
"llmEnableMemory": true
|
|
},
|
|
"id": "llmAgentflow_2-input-llmMemoryType-options",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Window Size",
|
|
"name": "llmMemoryWindowSize",
|
|
"type": "number",
|
|
"default": "20",
|
|
"description": "Uses a fixed window size to surface the last N messages",
|
|
"show": {
|
|
"llmMemoryType": "windowSize"
|
|
},
|
|
"id": "llmAgentflow_2-input-llmMemoryWindowSize-number",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Max Token Limit",
|
|
"name": "llmMemoryMaxTokenLimit",
|
|
"type": "number",
|
|
"default": "2000",
|
|
"description": "Summarize conversations once token limit is reached. Default to 2000",
|
|
"show": {
|
|
"llmMemoryType": "conversationSummaryBuffer"
|
|
},
|
|
"id": "llmAgentflow_2-input-llmMemoryMaxTokenLimit-number",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Input Message",
|
|
"name": "llmUserMessage",
|
|
"type": "string",
|
|
"description": "Add an input message as user message at the end of the conversation",
|
|
"rows": 4,
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"show": {
|
|
"llmEnableMemory": true
|
|
},
|
|
"id": "llmAgentflow_2-input-llmUserMessage-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Return Response As",
|
|
"name": "llmReturnResponseAs",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "User Message",
|
|
"name": "userMessage"
|
|
},
|
|
{
|
|
"label": "Assistant Message",
|
|
"name": "assistantMessage"
|
|
}
|
|
],
|
|
"default": "userMessage",
|
|
"id": "llmAgentflow_2-input-llmReturnResponseAs-options",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "JSON Structured Output",
|
|
"name": "llmStructuredOutput",
|
|
"description": "Instruct the LLM to give output in a JSON structured schema",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Key",
|
|
"name": "key",
|
|
"type": "string"
|
|
},
|
|
{
|
|
"label": "Type",
|
|
"name": "type",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "String",
|
|
"name": "string"
|
|
},
|
|
{
|
|
"label": "String Array",
|
|
"name": "stringArray"
|
|
},
|
|
{
|
|
"label": "Number",
|
|
"name": "number"
|
|
},
|
|
{
|
|
"label": "Boolean",
|
|
"name": "boolean"
|
|
},
|
|
{
|
|
"label": "Enum",
|
|
"name": "enum"
|
|
},
|
|
{
|
|
"label": "JSON Array",
|
|
"name": "jsonArray"
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"label": "Enum Values",
|
|
"name": "enumValues",
|
|
"type": "string",
|
|
"placeholder": "value1, value2, value3",
|
|
"description": "Enum values. Separated by comma",
|
|
"optional": true,
|
|
"show": {
|
|
"llmStructuredOutput[$index].type": "enum"
|
|
}
|
|
},
|
|
{
|
|
"label": "JSON Schema",
|
|
"name": "jsonSchema",
|
|
"type": "code",
|
|
"placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}",
|
|
"description": "JSON schema for the structured output",
|
|
"optional": true,
|
|
"show": {
|
|
"llmStructuredOutput[$index].type": "jsonArray"
|
|
}
|
|
},
|
|
{
|
|
"label": "Description",
|
|
"name": "description",
|
|
"type": "string",
|
|
"placeholder": "Description of the key"
|
|
}
|
|
],
|
|
"id": "llmAgentflow_2-input-llmStructuredOutput-array",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Update Flow State",
|
|
"name": "llmUpdateState",
|
|
"description": "Update runtime state during the execution of the workflow",
|
|
"type": "array",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"array": [
|
|
{
|
|
"label": "Key",
|
|
"name": "key",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listRuntimeStateKeys",
|
|
"freeSolo": true
|
|
},
|
|
{
|
|
"label": "Value",
|
|
"name": "value",
|
|
"type": "string",
|
|
"acceptVariable": true,
|
|
"acceptNodeOutputAsVariable": true
|
|
}
|
|
],
|
|
"id": "llmAgentflow_2-input-llmUpdateState-array",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"llmModel": "chatAnthropic",
|
|
"llmMessages": [
|
|
{
|
|
"role": "system",
|
|
"content": "<p>You are an agent designed to interact with a SQL database. Given an input question, create a syntactically correct sqlite query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most 5 results. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for the relevant columns given the question. DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.</p><p>Here is the relevant table info:</p><p><span class=\"variable\" data-type=\"mention\" data-id=\"customFunctionAgentflow_0\" data-label=\"customFunctionAgentflow_0\">{{ customFunctionAgentflow_0 }}</span> </p><p></p>"
|
|
}
|
|
],
|
|
"llmEnableMemory": true,
|
|
"llmMemoryType": "allMessages",
|
|
"llmUserMessage": "<p>Given the generated SQL Query: <span class=\"variable\" data-type=\"mention\" data-id=\"$flow.state.sqlQuery\" data-label=\"$flow.state.sqlQuery\">{{ $flow.state.sqlQuery }}</span> </p><p>I have the following error: <span class=\"variable\" data-type=\"mention\" data-id=\"customFunctionAgentflow_1\" data-label=\"customFunctionAgentflow_1\">{{ customFunctionAgentflow_1 }}</span> </p><p>Regenerate a new SQL Query that will fix the error</p>",
|
|
"llmReturnResponseAs": "userMessage",
|
|
"llmStructuredOutput": [
|
|
{
|
|
"key": "sql_query",
|
|
"type": "string",
|
|
"enumValues": "",
|
|
"jsonSchema": "",
|
|
"description": "SQL query"
|
|
}
|
|
],
|
|
"llmUpdateState": [
|
|
{
|
|
"key": "sqlQuery",
|
|
"value": "<p><span class=\"variable\" data-type=\"mention\" data-id=\"output.sql_query\" data-label=\"output.sql_query\">{{ output.sql_query }}</span> </p>"
|
|
}
|
|
],
|
|
"llmModelConfig": {
|
|
"credential": "",
|
|
"modelName": "claude-sonnet-4-0",
|
|
"temperature": 0.9,
|
|
"streaming": true,
|
|
"maxTokensToSample": "",
|
|
"topP": "",
|
|
"topK": "",
|
|
"extendedThinking": "",
|
|
"budgetTokens": 1024,
|
|
"allowImageUploads": "",
|
|
"llmModel": "chatAnthropic"
|
|
}
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "llmAgentflow_2-output-llmAgentflow",
|
|
"label": "LLM",
|
|
"name": "llmAgentflow"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "agentFlow",
|
|
"width": 220,
|
|
"height": 72,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 1235.4868883628933,
|
|
"y": 137.82100195002667
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "stickyNoteAgentflow_0",
|
|
"position": {
|
|
"x": 973.4435331695138,
|
|
"y": 156.551869199512
|
|
},
|
|
"data": {
|
|
"id": "stickyNoteAgentflow_0",
|
|
"label": "Sticky Note",
|
|
"version": 1,
|
|
"name": "stickyNoteAgentflow",
|
|
"type": "StickyNote",
|
|
"color": "#fee440",
|
|
"baseClasses": ["StickyNote"],
|
|
"category": "Agent Flows",
|
|
"description": "Add notes to the agent flow",
|
|
"inputParams": [
|
|
{
|
|
"label": "",
|
|
"name": "note",
|
|
"type": "string",
|
|
"rows": 1,
|
|
"placeholder": "Type something here",
|
|
"optional": true,
|
|
"id": "stickyNoteAgentflow_0-input-note-string",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"note": "This is an auto correct mechanism that regenerate sql query if result contains error"
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow",
|
|
"label": "Sticky Note",
|
|
"name": "stickyNoteAgentflow"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "stickyNote",
|
|
"width": 210,
|
|
"height": 123,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 973.4435331695138,
|
|
"y": 156.551869199512
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "stickyNoteAgentflow_1",
|
|
"position": {
|
|
"x": 514.8377809033279,
|
|
"y": 200.97994630025966
|
|
},
|
|
"data": {
|
|
"id": "stickyNoteAgentflow_1",
|
|
"label": "Sticky Note (1)",
|
|
"version": 1,
|
|
"name": "stickyNoteAgentflow",
|
|
"type": "StickyNote",
|
|
"color": "#fee440",
|
|
"baseClasses": ["StickyNote"],
|
|
"category": "Agent Flows",
|
|
"description": "Add notes to the agent flow",
|
|
"inputParams": [
|
|
{
|
|
"label": "",
|
|
"name": "note",
|
|
"type": "string",
|
|
"rows": 1,
|
|
"placeholder": "Type something here",
|
|
"optional": true,
|
|
"id": "stickyNoteAgentflow_1-input-note-string",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"note": "Check if generated SQL query contains errors/mistakes, if yes - regenerate"
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow",
|
|
"label": "Sticky Note",
|
|
"name": "stickyNoteAgentflow"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "stickyNote",
|
|
"width": 210,
|
|
"height": 123,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 514.8377809033279,
|
|
"y": 200.97994630025966
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "stickyNoteAgentflow_2",
|
|
"position": {
|
|
"x": 40.21835449345774,
|
|
"y": 6.978337213146034
|
|
},
|
|
"data": {
|
|
"id": "stickyNoteAgentflow_2",
|
|
"label": "Sticky Note (1) (2)",
|
|
"version": 1,
|
|
"name": "stickyNoteAgentflow",
|
|
"type": "StickyNote",
|
|
"color": "#fee440",
|
|
"baseClasses": ["StickyNote"],
|
|
"category": "Agent Flows",
|
|
"description": "Add notes to the agent flow",
|
|
"inputParams": [
|
|
{
|
|
"label": "",
|
|
"name": "note",
|
|
"type": "string",
|
|
"rows": 1,
|
|
"placeholder": "Type something here",
|
|
"optional": true,
|
|
"id": "stickyNoteAgentflow_2-input-note-string",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"note": "Retrieve database schema"
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "stickyNoteAgentflow_2-output-stickyNoteAgentflow",
|
|
"label": "Sticky Note",
|
|
"name": "stickyNoteAgentflow"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"type": "stickyNote",
|
|
"width": 210,
|
|
"height": 82,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 40.21835449345774,
|
|
"y": 6.978337213146034
|
|
},
|
|
"dragging": false
|
|
}
|
|
],
|
|
"edges": [
|
|
{
|
|
"source": "startAgentflow_0",
|
|
"sourceHandle": "startAgentflow_0-output-startAgentflow",
|
|
"target": "customFunctionAgentflow_0",
|
|
"targetHandle": "customFunctionAgentflow_0",
|
|
"data": {
|
|
"sourceColor": "#7EE787",
|
|
"targetColor": "#E4B7FF",
|
|
"isHumanInput": false
|
|
},
|
|
"type": "agentFlow",
|
|
"id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-customFunctionAgentflow_0-customFunctionAgentflow_0"
|
|
},
|
|
{
|
|
"source": "customFunctionAgentflow_0",
|
|
"sourceHandle": "customFunctionAgentflow_0-output-customFunctionAgentflow",
|
|
"target": "llmAgentflow_0",
|
|
"targetHandle": "llmAgentflow_0",
|
|
"data": {
|
|
"sourceColor": "#E4B7FF",
|
|
"targetColor": "#64B5F6",
|
|
"isHumanInput": false
|
|
},
|
|
"type": "agentFlow",
|
|
"id": "customFunctionAgentflow_0-customFunctionAgentflow_0-output-customFunctionAgentflow-llmAgentflow_0-llmAgentflow_0"
|
|
},
|
|
{
|
|
"source": "llmAgentflow_0",
|
|
"sourceHandle": "llmAgentflow_0-output-llmAgentflow",
|
|
"target": "conditionAgentAgentflow_0",
|
|
"targetHandle": "conditionAgentAgentflow_0",
|
|
"data": {
|
|
"sourceColor": "#64B5F6",
|
|
"targetColor": "#ff8fab",
|
|
"isHumanInput": false
|
|
},
|
|
"type": "agentFlow",
|
|
"id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0"
|
|
},
|
|
{
|
|
"source": "conditionAgentAgentflow_0",
|
|
"sourceHandle": "conditionAgentAgentflow_0-output-0",
|
|
"target": "customFunctionAgentflow_1",
|
|
"targetHandle": "customFunctionAgentflow_1",
|
|
"data": {
|
|
"sourceColor": "#ff8fab",
|
|
"targetColor": "#E4B7FF",
|
|
"edgeLabel": "0",
|
|
"isHumanInput": false
|
|
},
|
|
"type": "agentFlow",
|
|
"id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-customFunctionAgentflow_1-customFunctionAgentflow_1"
|
|
},
|
|
{
|
|
"source": "conditionAgentAgentflow_0",
|
|
"sourceHandle": "conditionAgentAgentflow_0-output-1",
|
|
"target": "loopAgentflow_0",
|
|
"targetHandle": "loopAgentflow_0",
|
|
"data": {
|
|
"sourceColor": "#ff8fab",
|
|
"targetColor": "#FFA07A",
|
|
"edgeLabel": "1",
|
|
"isHumanInput": false
|
|
},
|
|
"type": "agentFlow",
|
|
"id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-loopAgentflow_0-loopAgentflow_0"
|
|
},
|
|
{
|
|
"source": "customFunctionAgentflow_1",
|
|
"sourceHandle": "customFunctionAgentflow_1-output-customFunctionAgentflow",
|
|
"target": "conditionAgentAgentflow_1",
|
|
"targetHandle": "conditionAgentAgentflow_1",
|
|
"data": {
|
|
"sourceColor": "#E4B7FF",
|
|
"targetColor": "#ff8fab",
|
|
"isHumanInput": false
|
|
},
|
|
"type": "agentFlow",
|
|
"id": "customFunctionAgentflow_1-customFunctionAgentflow_1-output-customFunctionAgentflow-conditionAgentAgentflow_1-conditionAgentAgentflow_1"
|
|
},
|
|
{
|
|
"source": "conditionAgentAgentflow_1",
|
|
"sourceHandle": "conditionAgentAgentflow_1-output-0",
|
|
"target": "llmAgentflow_1",
|
|
"targetHandle": "llmAgentflow_1",
|
|
"data": {
|
|
"sourceColor": "#ff8fab",
|
|
"targetColor": "#64B5F6",
|
|
"edgeLabel": "0",
|
|
"isHumanInput": false
|
|
},
|
|
"type": "agentFlow",
|
|
"id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-0-llmAgentflow_1-llmAgentflow_1"
|
|
},
|
|
{
|
|
"source": "conditionAgentAgentflow_1",
|
|
"sourceHandle": "conditionAgentAgentflow_1-output-1",
|
|
"target": "llmAgentflow_2",
|
|
"targetHandle": "llmAgentflow_2",
|
|
"data": {
|
|
"sourceColor": "#ff8fab",
|
|
"targetColor": "#64B5F6",
|
|
"edgeLabel": "1",
|
|
"isHumanInput": false
|
|
},
|
|
"type": "agentFlow",
|
|
"id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-1-llmAgentflow_2-llmAgentflow_2"
|
|
},
|
|
{
|
|
"source": "llmAgentflow_2",
|
|
"sourceHandle": "llmAgentflow_2-output-llmAgentflow",
|
|
"target": "loopAgentflow_1",
|
|
"targetHandle": "loopAgentflow_1",
|
|
"data": {
|
|
"sourceColor": "#64B5F6",
|
|
"targetColor": "#FFA07A",
|
|
"isHumanInput": false
|
|
},
|
|
"type": "agentFlow",
|
|
"id": "llmAgentflow_2-llmAgentflow_2-output-llmAgentflow-loopAgentflow_1-loopAgentflow_1"
|
|
}
|
|
]
|
|
}
|