diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a18002288..60735ef18 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -120,45 +120,48 @@ Flowise has 3 different modules in a single mono repository.
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables)
-| Variable | Description | Type | Default |
-| ---------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
-| PORT | The HTTP port Flowise runs on | Number | 3000 |
-| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
-| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
-| FLOWISE_USERNAME | Username to login | String | |
-| FLOWISE_PASSWORD | Password to login | String | |
-| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
-| DEBUG | Print logs from components | Boolean | |
-| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
-| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
-| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
-| APIKEY_STORAGE_TYPE | To store api keys on a JSON file or database. Default is `json` | Enum String: `json`, `db` | `json` |
-| APIKEY_PATH | Location where api keys are saved when `APIKEY_STORAGE_TYPE` is `json` | String | `your-path/Flowise/packages/server` |
-| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
-| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
-| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
-| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
-| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
-| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
-| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
-| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
-| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
-| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
-| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
-| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
-| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
-| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean | |
-| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
-| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local` | `local` |
-| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
-| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
-| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
-| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
-| S3_STORAGE_REGION | Region for S3 bucket | String | |
-| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
-| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
-| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
-| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
+| Variable | Description | Type | Default |
+| ---------------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
+| PORT | The HTTP port Flowise runs on | Number | 3000 |
+| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
+| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
+| FLOWISE_USERNAME | Username to login | String | |
+| FLOWISE_PASSWORD | Password to login | String | |
+| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
+| DEBUG | Print logs from components | Boolean | |
+| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
+| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
+| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
+| APIKEY_STORAGE_TYPE | To store api keys on a JSON file or database. Default is `json` | Enum String: `json`, `db` | `json` |
+| APIKEY_PATH | Location where api keys are saved when `APIKEY_STORAGE_TYPE` is `json` | String | `your-path/Flowise/packages/server` |
+| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
+| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
+| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
+| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
+| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
+| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
+| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
+| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
+| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
+| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
+| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
+| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
+| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
+| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
+| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local`, `gcs` | `local` |
+| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
+| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
+| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
+| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
+| S3_STORAGE_REGION | Region for S3 bucket | String | |
+| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
+| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
+| GOOGLE_CLOUD_STORAGE_PROJ_ID | The GCP project id for cloud storage & logging when `STORAGE_TYPE` is `gcs` | String | |
+| GOOGLE_CLOUD_STORAGE_CREDENTIAL | The credential key file path when `STORAGE_TYPE` is `gcs` | String | |
+| GOOGLE_CLOUD_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `gcs` | String | |
+| GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS | Enable uniform bucket level access when `STORAGE_TYPE` is `gcs` | Boolean | true |
+| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
+| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
You can also specify the env variables when using `npx`. For example:
diff --git a/Dockerfile b/Dockerfile
index dfbf58d1b..a824b7f80 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,6 +12,10 @@ RUN apk add --no-cache build-base cairo-dev pango-dev
# Install Chromium
RUN apk add --no-cache chromium
+# Install curl for container-level health checks
+# Fixes: https://github.com/FlowiseAI/Flowise/issues/4126
+RUN apk add --no-cache curl
+
#install PNPM globaly
RUN npm install -g pnpm
diff --git a/README.md b/README.md
index 543054da2..d1c9b2da4 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,9 @@
-
-
-# Flowise - Build LLM Apps Easily
+
+
+
+
+
## ⚡Quick Start
@@ -182,9 +183,9 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [
[](https://elest.io/open-source/flowiseai)
- - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
+ - [Sealos](https://template.sealos.io/deploy?templateName=flowise)
- [](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
+ [](https://template.sealos.io/deploy?templateName=flowise)
- [RepoCloud](https://repocloud.io/details/?app_id=29)
diff --git a/docker/.env.example b/docker/.env.example
index bff5ef8f9..56ac56a80 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -42,13 +42,11 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
# LANGCHAIN_API_KEY=your_api_key
# LANGCHAIN_PROJECT=your_project
-# DISABLE_FLOWISE_TELEMETRY=true
-
# Uncomment the following line to enable model list config, load the list of models from your local config file
# see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format
# MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path
-# STORAGE_TYPE=local (local | s3)
+# STORAGE_TYPE=local (local | s3 | gcs)
# BLOB_STORAGE_PATH=/your_storage_path/.flowise/storage
# S3_STORAGE_BUCKET_NAME=flowise
# S3_STORAGE_ACCESS_KEY_ID=
-
-# Flowise - LLM アプリを簡単に構築
+
+
+
+
+
## ⚡ クイックスタート
diff --git a/i18n/README-KR.md b/i18n/README-KR.md
index b34e22628..c02b0b066 100644
--- a/i18n/README-KR.md
+++ b/i18n/README-KR.md
@@ -1,8 +1,9 @@
-
-
-# Flowise - 간편한 LLM 애플리케이션 제작
+
+
+
+
+
## ⚡빠른 시작 가이드
diff --git a/i18n/README-TW.md b/i18n/README-TW.md
new file mode 100644
index 000000000..f051e844e
--- /dev/null
+++ b/i18n/README-TW.md
@@ -0,0 +1,217 @@
+
+
+
+
+
+
+
+## ⚡ 快速開始
+
+下載並安裝 [NodeJS](https://nodejs.org/en/download) >= 18.15.0
+
+1. 安裝 Flowise
+ ```bash
+ npm install -g flowise
+ ```
+2. 啟動 Flowise
+
+ ```bash
+ npx flowise start
+ ```
+
+ 使用用戶名和密碼
+
+ ```bash
+ npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
+ ```
+
+3. 打開 [http://localhost:3000](http://localhost:3000)
+
+## 🐳 Docker
+
+### Docker Compose
+
+1. 克隆 Flowise 項目
+2. 進入項目根目錄的 `docker` 文件夾
+3. 複製 `.env.example` 文件,粘貼到相同位置,並重命名為 `.env` 文件
+4. `docker compose up -d`
+5. 打開 [http://localhost:3000](http://localhost:3000)
+6. 您可以通過 `docker compose stop` 停止容器
+
+### Docker 映像
+
+1. 本地構建映像:
+ ```bash
+ docker build --no-cache -t flowise .
+ ```
+2. 運行映像:
+
+ ```bash
+ docker run -d --name flowise -p 3000:3000 flowise
+ ```
+
+3. 停止映像:
+ ```bash
+ docker stop flowise
+ ```
+
+## 👨💻 開發者
+
+Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
+
+- `server`: 提供 API 邏輯的 Node 後端
+- `ui`: React 前端
+- `components`: 第三方節點集成
+- `api-documentation`: 從 express 自動生成的 swagger-ui API 文檔
+
+### 先決條件
+
+- 安裝 [PNPM](https://pnpm.io/installation)
+ ```bash
+ npm i -g pnpm
+ ```
+
+### 設置
+
+1. 克隆存儲庫
+
+ ```bash
+ git clone https://github.com/FlowiseAI/Flowise.git
+ ```
+
+2. 進入存儲庫文件夾
+
+ ```bash
+ cd Flowise
+ ```
+
+3. 安裝所有模塊的所有依賴項:
+
+ ```bash
+ pnpm install
+ ```
+
+4. 構建所有代碼:
+
+ ```bash
+ pnpm build
+ ```
+
+
-
-# Flowise - 轻松构建 LLM 应用程序
+
+
+
+
+
## ⚡ 快速入门
@@ -170,9 +171,9 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
[](https://elest.io/open-source/flowiseai)
- - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
+ - [Sealos](https://template.sealos.io/deploy?templateName=flowise)
- [](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
+ [](https://template.sealos.io/deploy?templateName=flowise)
- [RepoCloud](https://repocloud.io/details/?app_id=29)
diff --git a/images/flowise_agentflow.gif b/images/flowise_agentflow.gif
new file mode 100644
index 000000000..0e51d24f4
Binary files /dev/null and b/images/flowise_agentflow.gif differ
diff --git a/images/flowise_dark.svg b/images/flowise_dark.svg
new file mode 100644
index 000000000..f5c0725fa
--- /dev/null
+++ b/images/flowise_dark.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/flowise_white.svg b/images/flowise_white.svg
new file mode 100644
index 000000000..2a93a7449
--- /dev/null
+++ b/images/flowise_white.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/package.json b/package.json
index dbc40a39b..f7855fef5 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise",
- "version": "2.2.5",
+ "version": "3.0.0",
"private": true,
"homepage": "https://flowiseai.com",
"workspaces": [
@@ -13,7 +13,7 @@
"scripts": {
"build": "turbo run build",
"build-force": "pnpm clean && turbo run build --force",
- "dev": "turbo run dev --parallel",
+ "dev": "turbo run dev --parallel --no-cache",
"start": "run-script-os",
"start:windows": "cd packages/server/bin && run start",
"start:default": "cd packages/server/bin && ./run start",
@@ -62,7 +62,20 @@
"sqlite3"
],
"overrides": {
- "set-value": "^3.0.3"
+ "axios": "1.7.9",
+ "body-parser": "2.0.2",
+ "braces": "3.0.3",
+ "cross-spawn": "7.0.6",
+ "glob-parent": "6.0.2",
+ "http-proxy-middleware": "3.0.3",
+ "json5": "2.2.3",
+ "nth-check": "2.1.1",
+ "path-to-regexp": "0.1.12",
+ "prismjs": "1.29.0",
+ "semver": "7.7.1",
+ "set-value": "4.1.0",
+ "unset-value": "2.0.1",
+ "webpack-dev-middleware": "7.4.2"
}
},
"engines": {
@@ -70,11 +83,11 @@
"pnpm": ">=9"
},
"resolutions": {
- "@google/generative-ai": "^0.15.0",
+ "@google/generative-ai": "^0.24.0",
"@grpc/grpc-js": "^1.10.10",
"@langchain/core": "0.3.37",
"@qdrant/openapi-typescript-fetch": "1.2.6",
- "openai": "4.82.0",
+ "openai": "4.96.0",
"protobufjs": "7.4.0"
},
"eslintIgnore": [
diff --git a/packages/api-documentation/package.json b/packages/api-documentation/package.json
index 891cda8e0..780920f7c 100644
--- a/packages/api-documentation/package.json
+++ b/packages/api-documentation/package.json
@@ -5,7 +5,6 @@
"scripts": {
"build": "tsc",
"start": "node dist/index.js",
- "dev": "concurrently \"tsc-watch --noClear -p ./tsconfig.json\" \"nodemon\"",
"lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0"
},
"license": "SEE LICENSE IN LICENSE.md",
diff --git a/packages/components/README-ZH.md b/packages/components/README-ZH.md
index 52d43eb25..e672b6e7f 100644
--- a/packages/components/README-ZH.md
+++ b/packages/components/README-ZH.md
@@ -6,7 +6,7 @@
Flowise 的应用集成。包含节点和凭据。
-
+
安装:
diff --git a/packages/components/README.md b/packages/components/README.md
index f8e083746..867ad1ca1 100644
--- a/packages/components/README.md
+++ b/packages/components/README.md
@@ -6,7 +6,7 @@ English | [中文](./README-ZH.md)
Apps integration for Flowise. Contain Nodes and Credentials.
-
+
Install:
diff --git a/packages/components/credentials/HTTPApiKey.credential.ts b/packages/components/credentials/HTTPApiKey.credential.ts
new file mode 100644
index 000000000..92aaa6056
--- /dev/null
+++ b/packages/components/credentials/HTTPApiKey.credential.ts
@@ -0,0 +1,28 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class HTTPApiKeyCredential implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'HTTP Api Key'
+ this.name = 'httpApiKey'
+ this.version = 1.0
+ this.inputs = [
+ {
+ label: 'Key',
+ name: 'key',
+ type: 'string'
+ },
+ {
+ label: 'Value',
+ name: 'value',
+ type: 'password'
+ }
+ ]
+ }
+}
+
+module.exports = { credClass: HTTPApiKeyCredential }
diff --git a/packages/components/credentials/HTTPBasicAuth.credential.ts b/packages/components/credentials/HTTPBasicAuth.credential.ts
new file mode 100644
index 000000000..43b712e6f
--- /dev/null
+++ b/packages/components/credentials/HTTPBasicAuth.credential.ts
@@ -0,0 +1,28 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class HttpBasicAuthCredential implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'HTTP Basic Auth'
+ this.name = 'httpBasicAuth'
+ this.version = 1.0
+ this.inputs = [
+ {
+ label: 'Basic Auth Username',
+ name: 'basicAuthUsername',
+ type: 'string'
+ },
+ {
+ label: 'Basic Auth Password',
+ name: 'basicAuthPassword',
+ type: 'password'
+ }
+ ]
+ }
+}
+
+module.exports = { credClass: HttpBasicAuthCredential }
diff --git a/packages/components/credentials/HTTPBearerToken.credential.ts b/packages/components/credentials/HTTPBearerToken.credential.ts
new file mode 100644
index 000000000..f258aeb66
--- /dev/null
+++ b/packages/components/credentials/HTTPBearerToken.credential.ts
@@ -0,0 +1,23 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class HTTPBearerTokenCredential implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'HTTP Bearer Token'
+ this.name = 'httpBearerToken'
+ this.version = 1.0
+ this.inputs = [
+ {
+ label: 'Token',
+ name: 'token',
+ type: 'password'
+ }
+ ]
+ }
+}
+
+module.exports = { credClass: HTTPBearerTokenCredential }
diff --git a/packages/components/credentials/JiraApi.credential.ts b/packages/components/credentials/JiraApi.credential.ts
new file mode 100644
index 000000000..6638f2e0b
--- /dev/null
+++ b/packages/components/credentials/JiraApi.credential.ts
@@ -0,0 +1,33 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class JiraApi implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ description: string
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'Jira API'
+ this.name = 'jiraApi'
+ this.version = 1.0
+ this.description =
+ 'Refer to official guide on how to get accessToken on Github'
+ this.inputs = [
+ {
+ label: 'User Name',
+ name: 'username',
+ type: 'string',
+ placeholder: 'username@example.com'
+ },
+ {
+ label: 'Access Token',
+ name: 'accessToken',
+ type: 'password',
+ placeholder: '{{ question }}
' + }, + { + label: 'Scenarios', + name: 'conditionAgentScenarios', + description: 'Define the scenarios that will be used as the conditions to split the flow', + type: 'array', + array: [ + { + label: 'Scenario', + name: 'scenario', + type: 'string', + placeholder: 'User is asking for a pizza' + } + ], + default: [ + { + scenario: '' + }, + { + scenario: '' + } + ] + } + /*{ + label: 'Enable Memory', + name: 'conditionAgentEnableMemory', + type: 'boolean', + description: 'Enable memory for the conversation thread', + default: true, + optional: true + }, + { + label: 'Memory Type', + name: 'conditionAgentMemoryType', + type: 'options', + options: [ + { + label: 'All Messages', + name: 'allMessages', + description: 'Retrieve all messages from the conversation' + }, + { + label: 'Window Size', + name: 'windowSize', + description: 'Uses a fixed window size to surface the last N messages' + }, + { + label: 'Conversation Summary', + name: 'conversationSummary', + description: 'Summarizes the whole conversation' + }, + { + label: 'Conversation Summary Buffer', + name: 'conversationSummaryBuffer', + description: 'Summarize conversations once token limit is reached. Default to 2000' + } + ], + optional: true, + default: 'allMessages', + show: { + conditionAgentEnableMemory: true + } + }, + { + label: 'Window Size', + name: 'conditionAgentMemoryWindowSize', + type: 'number', + default: '20', + description: 'Uses a fixed window size to surface the last N messages', + show: { + conditionAgentMemoryType: 'windowSize' + } + }, + { + label: 'Max Token Limit', + name: 'conditionAgentMemoryMaxTokenLimit', + type: 'number', + default: '2000', + description: 'Summarize conversations once token limit is reached. Default to 2000', + show: { + conditionAgentMemoryType: 'conversationSummaryBuffer' + } + }*/ + ] + this.outputs = [ + { + label: '0', + name: '0', + description: 'Condition 0' + }, + { + label: '1', + name: '1', + description: 'Else' + } + ] + } + + //@ts-ignore + loadMethods = { + async listModels(_: INodeData, options: ICommonObject): PromiseSummarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback.
+{{ question }}
", + "id": "conditionAgentAgentflow_0-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "AI Related" + }, + { + "scenario": "General" + } + ], + "id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "Check if user is asking about AI related topic, or just general query
", + "conditionAgentInput": "{{ question }}
", + "conditionAgentScenarios": [ + { + "scenario": "AI Related" + }, + { + "scenario": "General" + } + ], + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_0-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_0-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": -114.84790789259606, + "y": 53.22583468442305 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": 158.29022963739308, + "y": -20.666608318859062 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Generate Query", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "Given the user question and history, construct a short string that can be used for searching vector database. Only generate the query, no meta comments, no explanation
Example:
Question: what are the events happening today?
Query: today's event
Example:
Question: how about the address?
Query: business address of the shop
Question: {{ question }}
Query:
" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": [ + { + "key": "query", + "value": "{{ output }}
" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 158.29022963739308, + "y": -20.666608318859062 + }, + "dragging": false + }, + { + "id": "llmAgentflow_1", + "position": { + "x": 165.82871786911647, + "y": 92.15131805222342 + }, + "data": { + "id": "llmAgentflow_1", + "label": "General Answer", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_1-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_1-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_1-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_1-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_1-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_1-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_1-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_1-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 165.82871786911647, + "y": 92.15131805222342 + }, + "dragging": false + }, + { + "id": "retrieverAgentflow_0", + "position": { + "x": 396.87575963946966, + "y": -17.41189617164227 + }, + "data": { + "id": "retrieverAgentflow_0", + "label": "Retriever Vector DB", + "version": 1, + "name": "retrieverAgentflow", + "type": "Retriever", + "color": "#b8bedd", + "baseClasses": ["Retriever"], + "category": "Agent Flows", + "description": "Retrieve information from vector database", + "inputParams": [ + { + "label": "Knowledge (Document Stores)", + "name": "retrieverKnowledgeDocumentStores", + "type": "array", + "description": "Document stores to retrieve information from. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + } + ], + "id": "retrieverAgentflow_0-input-retrieverKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Retriever Query", + "name": "retrieverQuery", + "type": "string", + "placeholder": "Enter your query here", + "rows": 4, + "acceptVariable": true, + "id": "retrieverAgentflow_0-input-retrieverQuery-string", + "display": true + }, + { + "label": "Output Format", + "name": "outputFormat", + "type": "options", + "options": [ + { + "label": "Text", + "name": "text" + }, + { + "label": "Text with Metadata", + "name": "textWithMetadata" + } + ], + "default": "text", + "id": "retrieverAgentflow_0-input-outputFormat-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "retrieverUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "retrieverAgentflow_0-input-retrieverUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "retrieverKnowledgeDocumentStores": [ + { + "documentStore": "570df92b-087b-4d3b-9462-7a11283454a5:ai paper" + } + ], + "retrieverQuery": "{{ $flow.state.query }}
", + "outputFormat": "text", + "retrieverUpdateState": "" + }, + "outputAnchors": [ + { + "id": "retrieverAgentflow_0-output-retrieverAgentflow", + "label": "Retriever", + "name": "retrieverAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 396.87575963946966, + "y": -17.41189617164227 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_1", + "position": { + "x": 647.9586712853835, + "y": -24.93225611691784 + }, + "data": { + "id": "conditionAgentAgentflow_1", + "label": "Check if docs relevant", + "version": 1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": ["ConditionAgent"], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_1-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_1-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "{{ question }}
", + "id": "conditionAgentAgentflow_1-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "Relevant" + }, + { + "scenario": "Irrelevant" + } + ], + "id": "conditionAgentAgentflow_1-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "Determine if the document is relevant to user question. User question is {{ question }}
", + "conditionAgentInput": "{{ retrieverAgentflow_0 }}
", + "conditionAgentScenarios": [ + { + "scenario": "Relevant" + }, + { + "scenario": "Irrelevant" + } + ], + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_1-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_1-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 206, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 647.9586712853835, + "y": -24.93225611691784 + }, + "dragging": false + }, + { + "id": "llmAgentflow_2", + "position": { + "x": 920.5416793343077, + "y": -75.82606372993476 + }, + "data": { + "id": "llmAgentflow_2", + "label": "Generate Response", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_2-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_2-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_2-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_2-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_2-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_2-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_2-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_2-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_2-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_2-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": "", + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "Given the question: {{ question }}
And the findings: {{ retrieverAgentflow_0 }}
Output the final response
", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_2-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 920.5416793343077, + "y": -75.82606372993476 + }, + "dragging": false + }, + { + "id": "llmAgentflow_3", + "position": { + "x": 921.1014768144131, + "y": 26.898902739007895 + }, + "data": { + "id": "llmAgentflow_3", + "label": "Regenerate Question", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_3-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_3-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_3-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_3-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_3-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_3-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_3-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_3-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_3-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_3-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "You are a helpful assistant that can transform the query to produce a better question.
" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "Look at the input and try to reason about the underlying semantic intent / meaning.
Here is the initial question:
{{ $flow.state.query }}
Formulate an improved question:
", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": [ + { + "key": "query", + "value": "{{ output }}
" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_3-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 199, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 921.1014768144131, + "y": 26.898902739007895 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 1160.0553838519766, + "y": 30.06685001229809 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop back to Retriever", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "retrieverAgentflow_0-Retriever Vector DB", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 208, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 1160.0553838519766, + "y": 30.06685001229809 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": 145.5705985486235, + "y": -116.29641765720946 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "First update of the state.query" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 81, + "selected": false, + "positionAbsolute": { + "x": 145.5705985486235, + "y": -116.29641765720946 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_1", + "position": { + "x": 923.4413972289242, + "y": 110.04672879978278 + }, + "data": { + "id": "stickyNoteAgentflow_1", + "label": "Sticky Note (1)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_1-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Second update of state.query" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 81, + "selected": false, + "positionAbsolute": { + "x": 923.4413972289242, + "y": 110.04672879978278 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-0", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-1", + "target": "llmAgentflow_1", + "targetHandle": "llmAgentflow_1", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-llmAgentflow_1-llmAgentflow_1" + }, + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "conditionAgentAgentflow_0", + "targetHandle": "conditionAgentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "retrieverAgentflow_0", + "targetHandle": "retrieverAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#b8bedd", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-retrieverAgentflow_0-retrieverAgentflow_0" + }, + { + "source": "retrieverAgentflow_0", + "sourceHandle": "retrieverAgentflow_0-output-retrieverAgentflow", + "target": "conditionAgentAgentflow_1", + "targetHandle": "conditionAgentAgentflow_1", + "data": { + "sourceColor": "#b8bedd", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "retrieverAgentflow_0-retrieverAgentflow_0-output-retrieverAgentflow-conditionAgentAgentflow_1-conditionAgentAgentflow_1" + }, + { + "source": "llmAgentflow_3", + "sourceHandle": "llmAgentflow_3-output-llmAgentflow", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_3-llmAgentflow_3-output-llmAgentflow-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-1", + "target": "llmAgentflow_3", + "targetHandle": "llmAgentflow_3", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-1-llmAgentflow_3-llmAgentflow_3" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-0", + "target": "llmAgentflow_2", + "targetHandle": "llmAgentflow_2", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-0-llmAgentflow_2-llmAgentflow_2" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Agents Handoff.json b/packages/server/marketplaces/agentflowsv2/Agents Handoff.json new file mode 100644 index 000000000..72b4da969 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Agents Handoff.json @@ -0,0 +1,1474 @@ +{ + "description": "A customer support agent that can handoff tasks to different agents based on scenarios", + "usecases": ["Customer Support"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -162.58207424380598, + "y": 117.81335679543406 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -162.58207424380598, + "y": 117.81335679543406 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_0", + "position": { + "x": -11.580228601760105, + "y": 99.42548336780041 + }, + "data": { + "id": "conditionAgentAgentflow_0", + "label": "Detect User Intention", + "version": 1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": ["ConditionAgent"], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_0-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "{{ question }}
", + "id": "conditionAgentAgentflow_0-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "User is asking for refund" + }, + { + "scenario": "User is looking for item" + } + ], + "id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "You are a customer support agent for ACME Inc.
Follow the following routine with the user:
1. First, greet the user and see how you can help the user
2. If user is looking for items, handoff to the Sales Agent
3. If user is looking for refund, handoff to Refund Agent
4. If user is asking general query, be helpful and answer the query
Note: Transfers between agents are handled seamlessly in the background; do not mention or draw attention to these transfers in your conversation with the user
", + "conditionAgentInput": "{{ question }}
", + "conditionAgentScenarios": [ + { + "scenario": "User is asking for refund" + }, + { + "scenario": "User is looking for item" + }, + { + "scenario": "User is chatting casually or asking general question" + } + ], + "conditionAgentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": true, + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_0-output-0", + "label": 0, + "name": 0, + "description": "Condition 0" + }, + { + "id": "conditionAgentAgentflow_0-output-1", + "label": 1, + "name": 1, + "description": "Condition 1" + }, + { + "id": "conditionAgentAgentflow_0-output-2", + "label": 2, + "name": 2, + "description": "Condition 2" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 200, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": -11.580228601760105, + "y": 99.42548336780041 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 253.4811075082052, + "y": 17.0330403645183 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Refund Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatGoogleGenerativeAI", + "agentMessages": [ + { + "role": "system", + "content": "You are a refund agent. Help the user with refunds.
" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gemini-2.0-flash", + "customModelName": "", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "allowImageUploads": "", + "agentModel": "chatGoogleGenerativeAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 191, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 253.4811075082052, + "y": 17.0330403645183 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 253.74384888466125, + "y": 113.94007038630222 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Sales Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatAnthropic", + "agentMessages": [ + { + "role": "system", + "content": "You are a sales assistant. Help user search for the product.
" + } + ], + "agentTools": [ + { + "agentSelectedTool": "googleCustomSearch", + "agentSelectedToolConfig": { + "agentSelectedTool": "googleCustomSearch" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "claude-3-7-sonnet-latest", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "", + "agentModel": "chatAnthropic" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 231, + "height": 103, + "selected": false, + "positionAbsolute": { + "x": 253.74384888466125, + "y": 113.94007038630222 + }, + "dragging": false + }, + { + "id": "agentAgentflow_2", + "position": { + "x": 250.2139715995238, + "y": 234.20808458654034 + }, + "data": { + "id": "agentAgentflow_2", + "label": "General Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_2-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_2-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_2-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_2-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_2-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_2-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_2-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_2-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "groqChat", + "agentMessages": [ + { + "role": "system", + "content": "You are helpful assistant
" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "llama-3.2-3b-preview", + "temperature": 0.9, + "streaming": true, + "agentModel": "groqChat" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_2-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 214, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 250.2139715995238, + "y": 234.20808458654034 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": 246.81594867785896, + "y": -103.07943752447065 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "We can improve this by adding necessary tools for agents" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 101, + "selected": false, + "positionAbsolute": { + "x": 246.81594867785896, + "y": -103.07943752447065 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "conditionAgentAgentflow_0", + "targetHandle": "conditionAgentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-0", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-1", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-2", + "target": "agentAgentflow_2", + "targetHandle": "agentAgentflow_2", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "2", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-2-agentAgentflow_2-agentAgentflow_2" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Deep Research V2.json b/packages/server/marketplaces/agentflowsv2/Deep Research V2.json new file mode 100644 index 000000000..bd58656b8 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Deep Research V2.json @@ -0,0 +1,2142 @@ +{ + "description": "An agent capable of performing research, synthesizing information, and generating in-depth, well-structured white papers on any given topic", + "usecases": ["Agent"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -275.0799323960054, + "y": 31.301887150099603 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "id": "startAgentflow_0-input-startEphemeralMemory-boolean", + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar", + "optional": true + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "startEphemeralMemory": true, + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -275.0799323960054, + "y": 31.301887150099603 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -59.13383952997965, + "y": 28.495983624910906 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Topic Enhancer", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": false + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": false + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "developer", + "content": "Your only role is to improve the user query for more clarity. Do not add any meta comments.
" + } + ], + "llmEnableMemory": false, + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": "0.5", + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 175, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": -59.13383952997965, + "y": 28.495983624910906 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 209.99147630894493, + "y": 100.7933285478893 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Agent 0", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatGoogleGenerativeAI", + "agentMessages": [ + { + "role": "system", + "content": "You are Agent 0. Your goal is to explore any topic provided by the user in depth with Agent 1.
Start: Introduce the topic to Agent 1. Share your initial thoughts and any assumptions you have.
Research & Share:
Use BraveSearch API to find a range of information and different viewpoints on the topic. Look for URLs that seem promising for more detail.
If a URL from BraveSearch API (or one you already know) seems particularly important, use the Web Scraper Tool to get its full content.
Present what you find to Agent 1, especially any complexities, counter-arguments, or conflicting data.
Clearly state your sources:
\"BraveSearch API found...\"
\"After scraping [URL], the content shows...\"
Discuss & Deepen:
Listen to Agent 1. Ask probing questions.
If needed, use your tools again (BraveSearch API to find more, Web Scraper to analyze a specific page) during the conversation to verify points or explore new angles.
Mindset: Be curious, analytical, and open to different perspectives. Aim for a thorough understanding, not just agreement.
You are Agent 1. Your goal is to explore a topic in depth with Agent 0.
Respond & Share:
Acknowledge the topic Agent 0 introduces.
Share your own thoughts and feelings, building on or respectfully challenging Agent 0's points. Consider your own assumptions.
Research & Contribute:
Use BraveSearch API to research the topic, especially looking for different perspectives, counter-arguments, or aspects Agent 0 might not have covered. Identify URLs that seem promising for more detail.
If a URL from BraveSearch API (or one you already know) seems particularly important for your point or for adding nuance, use the Web Scraper Tool to get its full content.
Present your findings, especially any that introduce new angles, conflicts, or alternative views.
Clearly state your sources:
\"My BraveSearch API tool found...\"
\"After scraping [URL], the content suggests...\"
If you find conflicting info from different sources, point this out.
Discuss & Deepen:
Listen carefully to Agent 0. Ask clarifying questions and questions that challenge their reasoning or explore alternatives.
If needed, use your tools again (BraveSearch API to find more, Web Scraper to analyze a specific page) during the conversation to support your points or investigate Agent 0's claims.
Mindset: Be respectful, analytical, and open to different viewpoints. Aim for a thorough exploration and constructive disagreement, backed by research.
{{ runtime_messages_length }}
", + "operation": "smallerEqual", + "value2": "11
" + } + ] + }, + "outputAnchors": [ + { + "id": "conditionAgentflow_0-output-0", + "label": "Condition", + "name": "conditionAgentflow" + }, + { + "id": "conditionAgentflow_0-output-1", + "label": "Condition", + "name": "conditionAgentflow" + } + ], + "outputs": { + "conditionAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 134, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 497.07879661792845, + "y": 29.068421396935392 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 710.6354115635097, + "y": -61.015932400168076 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "agentAgentflow_0-Agent 0", + "maxLoopCount": "10" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 104, + "height": 65, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 710.6354115635097, + "y": -61.015932400168076 + } + }, + { + "id": "llmAgentflow_1", + "position": { + "x": 693.0529196789191, + "y": 133.0683091126315 + }, + "data": { + "id": "llmAgentflow_1", + "label": "Agent 2", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_1-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_1-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_1-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmMemoryType-options", + "display": false + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_1-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmUserMessage-string", + "display": false + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_1-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_1-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_1-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatGoogleGenerativeAI", + "llmMessages": [ + { + "role": "system", + "content": "You are Agent 2. Your role is to transform the deep conversation between Agent 0 and Agent 1 into a comprehensive and extensive white paper on the subject they discussed.
Your goal is to produce an authoritative document that not only captures the essence of their dialogue but also expands upon it, providing a thorough exploration of the topic. This white paper should be suitable for an audience seeking a deep understanding of the subject.
The white paper must include, but is not limited to, the following sections and considerations:
Title: A clear, compelling title for the white paper that reflects the core subject.
Abstract/Executive Summary: A concise overview (approx. 200-300 words) of the white paper's main arguments, scope, and conclusions, derived from the conversation.
Introduction:
Set the context and importance of the subject discussed by Agent 0 and Agent 1.
Clearly define the central problem, question, or theme that the white paper will address, based on their dialogue.
Outline the paper's structure and objectives.
Main Body / Thematic Analysis (Multiple Sections):
Deconstruct and Synthesize Key Arguments: Detail the principal arguments, propositions, and evidence presented by both Agent 0 and Agent 1. Go beyond mere listing; analyze the strengths, weaknesses, and underlying assumptions of their positions.
Explore Core Themes and Concepts: Identify and elaborate on the major themes and concepts that emerged. For each theme, discuss how Agent 0 and Agent 1 approached it, their points of convergence, and their points of divergence.
Analyze the Evolution of the Discussion: Trace how the understanding of the subject evolved throughout their conversation. Highlight any shifts in perspective, critical turning points, challenged assumptions, or moments of significant clarification.
Evidence and Examples: Where the agents provided examples or evidence, incorporate and potentially expand upon these to support the white paper's analysis.
Synthesis of Insights and Key Conclusions:
Draw together the most significant insights and conclusions that can be derived from the entirety of the conversation.
This section should offer a consolidated understanding of the subject, informed by the agents' interaction.
Implications and Future Directions:
Discuss the broader implications of the insights and conclusions reached.
Identify any unresolved questions, ambiguities, or areas that the conversation indicated require further exploration or research.
Suggest potential next steps or future avenues of inquiry.
Conclusion: A strong concluding section summarizing the white paper's main findings, their significance, and a final thought on the subject.
Style and Tone:
Extensive and In-depth: The paper should be thorough and detailed.
Well-Structured: Use clear headings, subheadings, and logical flow.
Analytical and Critical: Do not just report; analyze, interpret, and critically engage with the agents' ideas.
Objective and Authoritative: While based on the agents' dialogue, the white paper should present a balanced and well-reasoned perspective.
Clear Attribution: When discussing specific viewpoints or arguments, clearly attribute them to Agent 0 or Agent 1.
Formal and Professional Language: Maintain a tone appropriate for a white paper.
Your primary source material is the conversation between Agent 0 and Agent 1. Your task is to elevate their discourse into a structured, analytical, and extensive white paper.
" + }, + { + "role": "user", + "content": "Here is the full conversation between Agent 0 and Agent 1. Please use this as the primary source material for generating the extensive white paper as per your instructions:
--
{{ chat_history }}
--
You are a customer support agent working in Flowise Inc. Write a professional email reply to user's query. Use the web search tools to get more details about the prospect.
" + } + ], + "agentTools": [ + { + "agentSelectedTool": "googleCustomSearch", + "agentSelectedToolConfig": { + "agentSelectedTool": "googleCustomSearch" + } + }, + { + "agentSelectedTool": "currentDateTime", + "agentSelectedToolConfig": { + "agentSelectedTool": "currentDateTime" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 182, + "height": 103, + "selected": false, + "positionAbsolute": { + "x": -62.25, + "y": 76 + }, + "dragging": false + }, + { + "id": "humanInputAgentflow_0", + "position": { + "x": 156.05666363734434, + "y": 86.62266545493773 + }, + "data": { + "id": "humanInputAgentflow_0", + "label": "Human Input 0", + "version": 1, + "name": "humanInputAgentflow", + "type": "HumanInput", + "color": "#6E6EFD", + "baseClasses": ["HumanInput"], + "category": "Agent Flows", + "description": "Request human input, approval or rejection during execution", + "inputParams": [ + { + "label": "Description Type", + "name": "humanInputDescriptionType", + "type": "options", + "options": [ + { + "label": "Fixed", + "name": "fixed", + "description": "Specify a fixed description" + }, + { + "label": "Dynamic", + "name": "dynamic", + "description": "Use LLM to generate a description" + } + ], + "id": "humanInputAgentflow_0-input-humanInputDescriptionType-options", + "display": true + }, + { + "label": "Description", + "name": "humanInputDescription", + "type": "string", + "placeholder": "Are you sure you want to proceed?", + "acceptVariable": true, + "rows": 4, + "show": { + "humanInputDescriptionType": "fixed" + }, + "id": "humanInputAgentflow_0-input-humanInputDescription-string", + "display": true + }, + { + "label": "Model", + "name": "humanInputModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "show": { + "humanInputDescriptionType": "dynamic" + }, + "id": "humanInputAgentflow_0-input-humanInputModel-asyncOptions", + "display": false + }, + { + "label": "Prompt", + "name": "humanInputModelPrompt", + "type": "string", + "default": "Summarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback.
\nAre you sure you want to proceed?
" + }, + "outputAnchors": [ + { + "id": "humanInputAgentflow_0-output-0", + "label": "Human Input", + "name": "humanInputAgentflow" + }, + { + "id": "humanInputAgentflow_0-output-1", + "label": "Human Input", + "name": "humanInputAgentflow" + } + ], + "outputs": { + "humanInputAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 161, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 156.05666363734434, + "y": 86.62266545493773 + }, + "dragging": false + }, + { + "id": "directReplyAgentflow_0", + "position": { + "x": 363.0101864947954, + "y": 35.15053748988734 + }, + "data": { + "id": "directReplyAgentflow_0", + "label": "Direct Reply 0", + "version": 1, + "name": "directReplyAgentflow", + "type": "DirectReply", + "color": "#4DDBBB", + "hideOutput": true, + "baseClasses": ["DirectReply"], + "category": "Agent Flows", + "description": "Directly reply to the user with a message", + "inputParams": [ + { + "label": "Message", + "name": "directReplyMessage", + "type": "string", + "rows": 4, + "acceptVariable": true, + "id": "directReplyAgentflow_0-input-directReplyMessage-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "directReplyMessage": "{{ agentAgentflow_0 }}
" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 155, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 363.0101864947954, + "y": 35.15053748988734 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 366.5975521223236, + "y": 130.12266545493773 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop 0", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "agentAgentflow_0-Email Reply Agent", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 113, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 366.5975521223236, + "y": 130.12266545493773 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "agentAgentflow_0", + "sourceHandle": "agentAgentflow_0-output-agentAgentflow", + "target": "humanInputAgentflow_0", + "targetHandle": "humanInputAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#6E6EFD", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_0-agentAgentflow_0-output-agentAgentflow-humanInputAgentflow_0-humanInputAgentflow_0" + }, + { + "source": "humanInputAgentflow_0", + "sourceHandle": "humanInputAgentflow_0-output-0", + "target": "directReplyAgentflow_0", + "targetHandle": "directReplyAgentflow_0", + "data": { + "sourceColor": "#6E6EFD", + "targetColor": "#4DDBBB", + "edgeLabel": "proceed", + "isHumanInput": true + }, + "type": "agentFlow", + "id": "humanInputAgentflow_0-humanInputAgentflow_0-output-0-directReplyAgentflow_0-directReplyAgentflow_0" + }, + { + "source": "humanInputAgentflow_0", + "sourceHandle": "humanInputAgentflow_0-output-1", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#6E6EFD", + "targetColor": "#FFA07A", + "edgeLabel": "reject", + "isHumanInput": true + }, + "type": "agentFlow", + "id": "humanInputAgentflow_0-humanInputAgentflow_0-output-1-loopAgentflow_0-loopAgentflow_0" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Financial Research Agent.json b/packages/server/marketplaces/agentflowsv2/Financial Research Agent.json new file mode 100644 index 000000000..f72312e75 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Financial Research Agent.json @@ -0,0 +1,1503 @@ +{ + "description": "A financial research agent that takes in a query, plan the steps, search the web, and return a detailed report", + "usecases": ["Finance & Accounting"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -234.94624728418063, + "y": 84.92919739582129 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": true + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": true + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": true + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "formInput", + "formTitle": "Finanical Research", + "formDescription": "A financial research agent that takes in a query, and return a detailed report", + "formInputTypes": [ + { + "type": "string", + "label": "Query", + "name": "query", + "addOptions": "" + } + ], + "startState": [ + { + "key": "search_key_reason", + "value": "" + } + ] + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -234.94624728418063, + "y": 84.92919739582129 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -92.42002168895628, + "y": 81.69973969492588 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Planner", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "You are a financial research planner. Given a request for financial analysis, produce a set of web searches to gather the context needed. Aim for recent headlines, earnings calls or 10‑K snippets, analyst commentary, and industry background. Output between 1 and 2 search terms to query for.
" + }, + { + "role": "user", + "content": "Query:
{{ $form.query }}
" + } + ], + "llmEnableMemory": true, + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "searches", + "type": "jsonArray", + "enumValues": "", + "jsonSchema": "{\n \"query\": {\n \"type\": \"string\",\n \"description\": \"The search term to feed into a web (or file) search.\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Your reasoning for why this search is relevant.\"\n }\n}", + "description": "A list of searches to perform" + } + ], + "llmUpdateState": [ + { + "key": "search_key_reason", + "value": "{{ output.searches }}
" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + }, + "llmUserMessage": "" + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": -92.42002168895628, + "y": 81.69973969492588 + }, + "dragging": false + }, + { + "id": "iterationAgentflow_0", + "position": { + "x": 122.70987564816664, + "y": -7.337791594648152 + }, + "data": { + "id": "iterationAgentflow_0", + "label": "Iteration 0", + "version": 1, + "name": "iterationAgentflow", + "type": "Iteration", + "color": "#9C89B8", + "baseClasses": ["Iteration"], + "category": "Agent Flows", + "description": "Execute the nodes within the iteration block through N iterations", + "inputParams": [ + { + "label": "Array Input", + "name": "iterationInput", + "type": "string", + "description": "The input array to iterate over", + "acceptVariable": true, + "rows": 4, + "id": "iterationAgentflow_0-input-iterationInput-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "iterationInput": "{{ $flow.state.search_key_reason }}
" + }, + "outputAnchors": [ + { + "id": "iterationAgentflow_0-output-iterationAgentflow", + "label": "Iteration", + "name": "iterationAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "iteration", + "width": 300, + "height": 250, + "selected": false, + "positionAbsolute": { + "x": 122.70987564816664, + "y": -7.337791594648152 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 67.5, + "y": 80.5 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Search Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "You are a research assistant specializing in financial topics. Given a search term, use web search to retrieve up‑to‑date context and produce a short summary of at most 300 words. Focus on key numbers, events, or quotes that will be useful to a financial analyst.
" + } + ], + "agentTools": [ + { + "agentSelectedTool": "googleCustomSearch", + "agentSelectedToolConfig": { + "agentSelectedTool": "googleCustomSearch" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "Search term: {{$iteration.query}}
Reason: {{$iteration.reason}}
", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "parentNode": "iterationAgentflow_0", + "extent": "parent", + "width": 168, + "height": 103, + "selected": false, + "positionAbsolute": { + "x": 190.20987564816664, + "y": 73.16220840535185 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 461.76351005035474, + "y": 81.71183989476083 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Writer Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "You are a senior financial analyst. You will be provided with the original query and a set of raw search summaries. Your task is to synthesize these into a long‑form markdown report (at least several paragraphs) including a short executive summary and follow‑up questions
" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "Original query: {{ $form.query }}
Summarized search results: {{ iterationAgentflow_0 }}
", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 461.76351005035474, + "y": 81.71183989476083 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": 214.77714507955716, + "y": -165.2444952661696 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Search Agent will iterate through the search terms and search the web using tool" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 142, + "selected": false, + "positionAbsolute": { + "x": 214.77714507955716, + "y": -165.2444952661696 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_1", + "position": { + "x": -100.05436009717414, + "y": -45.56902388417101 + }, + "data": { + "id": "stickyNoteAgentflow_1", + "label": "Sticky Note (1)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_1-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Planner will generate list of search terms to query for" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 101, + "selected": false, + "positionAbsolute": { + "x": -100.05436009717414, + "y": -45.56902388417101 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_2", + "position": { + "x": 457.98399139175314, + "y": -35.19227767879839 + }, + "data": { + "id": "stickyNoteAgentflow_2", + "label": "Sticky Note (2)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_2-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Generate the final report from the search results" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_2-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 101, + "selected": false, + "positionAbsolute": { + "x": 457.98399139175314, + "y": -35.19227767879839 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "iterationAgentflow_0", + "targetHandle": "iterationAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#9C89B8", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-iterationAgentflow_0-iterationAgentflow_0" + }, + { + "source": "iterationAgentflow_0", + "sourceHandle": "iterationAgentflow_0-output-iterationAgentflow", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#9C89B8", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "iterationAgentflow_0-iterationAgentflow_0-output-iterationAgentflow-agentAgentflow_1-agentAgentflow_1" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Iterations.json b/packages/server/marketplaces/agentflowsv2/Iterations.json new file mode 100644 index 000000000..b33dd1a54 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Iterations.json @@ -0,0 +1,1278 @@ +{ + "description": "An agent that can iterate over a list of items and perform actions on each item", + "usecases": ["Agent"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -157.7434917749852, + "y": 100.77695246750446 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -157.7434917749852, + "y": 100.77695246750446 + }, + "dragging": false + }, + { + "id": "iterationAgentflow_0", + "position": { + "x": -13.75, + "y": 8.5 + }, + "data": { + "id": "iterationAgentflow_0", + "label": "Iteration 0", + "version": 1, + "name": "iterationAgentflow", + "type": "Iteration", + "color": "#9C89B8", + "baseClasses": ["Iteration"], + "category": "Agent Flows", + "description": "Execute the nodes within the iteration block through N iterations", + "inputParams": [ + { + "label": "Array Input", + "name": "iterationInput", + "type": "string", + "description": "The input array to iterate over", + "acceptVariable": true, + "rows": 4, + "id": "iterationAgentflow_0-input-iterationInput-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "iterationInput": "[{\"item\": \"abc\"}, {\"item\": \"def\"}]
" + }, + "outputAnchors": [ + { + "id": "iterationAgentflow_0-output-iterationAgentflow", + "label": "Iteration", + "name": "iterationAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "iteration", + "width": 481, + "height": 250, + "selected": false, + "positionAbsolute": { + "x": -13.75, + "y": 8.5 + }, + "dragging": false, + "style": { + "width": 481, + "height": 250 + }, + "resizing": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": 56, + "y": 92 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Gemini Agent", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatGoogleGenerativeAI", + "llmMessages": "", + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "Reply only:
{{$iteration.item}}
", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "credential": "", + "modelName": "gemini-2.0-flash", + "customModelName": "", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "allowImageUploads": "", + "llmModel": "chatGoogleGenerativeAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "parentNode": "iterationAgentflow_0", + "extent": "parent", + "width": 191, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 42.25, + "y": 100.5 + }, + "dragging": false + }, + { + "id": "llmAgentflow_1", + "position": { + "x": 287.9621736478904, + "y": 92.25785828325522 + }, + "data": { + "id": "llmAgentflow_1", + "label": "Ollama Agent", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_1-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_1-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_1-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_1-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_1-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_1-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_1-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOllama", + "llmMessages": "", + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "Reply only:
{{$iteration.item}}
", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "baseUrl": "http://localhost:11434", + "modelName": "llama3.2", + "temperature": 0.9, + "allowImageUploads": "", + "streaming": true, + "jsonMode": "", + "keepAlive": "5m", + "topP": "", + "topK": "", + "mirostat": "", + "mirostatEta": "", + "mirostatTau": "", + "numCtx": "", + "numGpu": "", + "numThread": "", + "repeatLastN": "", + "repeatPenalty": "", + "stop": "", + "tfsZ": "", + "llmModel": "chatOllama" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_1-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "parentNode": "iterationAgentflow_0", + "extent": "parent", + "width": 154, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 274.2121736478904, + "y": 100.75785828325522 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 509.27738295829977, + "y": 97.28505776122253 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": "", + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 509.27738295829977, + "y": 97.28505776122253 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "iterationAgentflow_0", + "targetHandle": "iterationAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#9C89B8", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-iterationAgentflow_0-iterationAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "llmAgentflow_1", + "targetHandle": "llmAgentflow_1", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "zIndex": 9999, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-llmAgentflow_1-llmAgentflow_1" + }, + { + "source": "iterationAgentflow_0", + "sourceHandle": "iterationAgentflow_0-output-iterationAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#9C89B8", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "iterationAgentflow_0-iterationAgentflow_0-output-iterationAgentflow-agentAgentflow_0-agentAgentflow_0" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Slack Agent.json b/packages/server/marketplaces/agentflowsv2/Slack Agent.json new file mode 100644 index 000000000..cd30db646 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Slack Agent.json @@ -0,0 +1,718 @@ +{ + "description": "An agent that can post message to Slack channel", + "usecases": ["Agent"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -192.5, + "y": 68 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -192.5, + "y": 68 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -31.25, + "y": 64.5 + }, + "data": { + "id": "llmAgentflow_0", + "label": "General Agent", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": "", + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": -31.25, + "y": 64.5 + }, + "dragging": false + }, + { + "id": "toolAgentflow_0", + "position": { + "x": 182.75, + "y": 64.5 + }, + "data": { + "id": "toolAgentflow_0", + "label": "Slack Reply", + "version": 1, + "name": "toolAgentflow", + "type": "Tool", + "color": "#d4a373", + "baseClasses": ["Tool"], + "category": "Agent Flows", + "description": "Tools allow LLM to interact with external systems", + "inputParams": [ + { + "label": "Tool", + "name": "selectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true, + "id": "toolAgentflow_0-input-selectedTool-asyncOptions", + "display": true + }, + { + "label": "Tool Input Arguments", + "name": "toolInputArgs", + "type": "array", + "acceptVariable": true, + "refresh": true, + "array": [ + { + "label": "Input Argument Name", + "name": "inputArgName", + "type": "asyncOptions", + "loadMethod": "listToolInputArgs", + "refresh": true + }, + { + "label": "Input Argument Value", + "name": "inputArgValue", + "type": "string", + "acceptVariable": true + } + ], + "show": { + "selectedTool": ".+" + }, + "id": "toolAgentflow_0-input-toolInputArgs-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "toolUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "toolAgentflow_0-input-toolUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "selectedTool": "slackMCP", + "toolInputArgs": [ + { + "inputArgName": "channel_id", + "inputArgValue": "ABCDEFG
" + }, + { + "inputArgName": "text", + "inputArgValue": "{{ llmAgentflow_0 }}
" + } + ], + "toolUpdateState": "", + "selectedToolConfig": { + "mcpActions": "[\"slack_post_message\"]", + "selectedTool": "slackMCP" + } + }, + "outputAnchors": [ + { + "id": "toolAgentflow_0-output-toolAgentflow", + "label": "Tool", + "name": "toolAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 142, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 182.75, + "y": 64.5 + }, + "dragging": false + }, + { + "id": "directReplyAgentflow_0", + "position": { + "x": 366.75, + "y": 67.5 + }, + "data": { + "id": "directReplyAgentflow_0", + "label": "Direct Reply To Chat", + "version": 1, + "name": "directReplyAgentflow", + "type": "DirectReply", + "color": "#4DDBBB", + "hideOutput": true, + "baseClasses": ["DirectReply"], + "category": "Agent Flows", + "description": "Directly reply to the user with a message", + "inputParams": [ + { + "label": "Message", + "name": "directReplyMessage", + "type": "string", + "rows": 4, + "acceptVariable": true, + "id": "directReplyAgentflow_0-input-directReplyMessage-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "directReplyMessage": "{{ llmAgentflow_0 }}
" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 194, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 366.75, + "y": 67.5 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "toolAgentflow_0", + "targetHandle": "toolAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#d4a373", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-toolAgentflow_0-toolAgentflow_0" + }, + { + "source": "toolAgentflow_0", + "sourceHandle": "toolAgentflow_0-output-toolAgentflow", + "target": "directReplyAgentflow_0", + "targetHandle": "directReplyAgentflow_0", + "data": { + "sourceColor": "#d4a373", + "targetColor": "#4DDBBB", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "toolAgentflow_0-toolAgentflow_0-output-toolAgentflow-directReplyAgentflow_0-directReplyAgentflow_0" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Supervisor Worker.json b/packages/server/marketplaces/agentflowsv2/Supervisor Worker.json new file mode 100644 index 000000000..dbf60b335 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Supervisor Worker.json @@ -0,0 +1,2080 @@ +{ + "description": "A hierarchical supervisor agent that plan the steps, and delegate tasks to worker agents based on user query", + "usecases": ["Hierarchical Agent Teams"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -234.25083179589828, + "y": 89.8928676312403 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": [ + { + "key": "next", + "value": "" + }, + { + "key": "instruction", + "value": "" + } + ] + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -234.25083179589828, + "y": 89.8928676312403 + }, + "dragging": false + }, + { + "id": "conditionAgentflow_0", + "position": { + "x": 128.47781848153903, + "y": 73.36847122134466 + }, + "data": { + "id": "conditionAgentflow_0", + "label": "Check next worker", + "version": 1, + "name": "conditionAgentflow", + "type": "Condition", + "color": "#FFB938", + "baseClasses": ["Condition"], + "category": "Agent Flows", + "description": "Split flows based on If Else conditions", + "inputParams": [ + { + "label": "Conditions", + "name": "conditions", + "type": "array", + "description": "Values to compare", + "acceptVariable": true, + "default": [ + { + "type": "string", + "value1": "{{ $flow.state.next }}
", + "operation": "equal", + "value2": "SOFTWARE
" + } + ], + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + } + ], + "default": "string" + }, + { + "label": "Value 1", + "name": "value1", + "type": "string", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Contains", + "name": "contains" + }, + { + "label": "Ends With", + "name": "endsWith" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Contains", + "name": "notContains" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Regex", + "name": "regex" + }, + { + "label": "Starts With", + "name": "startsWith" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "string", + "default": "", + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + }, + "hide": { + "conditions[$index].operation": ["isEmpty", "notEmpty"] + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "number", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Smaller", + "name": "smaller" + }, + { + "label": "Smaller Equal", + "name": "smallerEqual" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Larger", + "name": "larger" + }, + { + "label": "Larger Equal", + "name": "largerEqual" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "number", + "default": 0, + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "boolean", + "default": false, + "description": "First value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "boolean", + "default": false, + "description": "Second value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + } + ], + "id": "conditionAgentflow_0-input-conditions-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditions": [ + { + "type": "string", + "value1": "{{ $flow.state.next }}
", + "operation": "equal", + "value2": "SOFTWARE
" + }, + { + "type": "string", + "value1": "{{ $flow.state.next }}
", + "operation": "equal", + "value2": "REVIEWER
" + } + ] + }, + "outputAnchors": [ + { + "id": "conditionAgentflow_0-output-0", + "label": 0, + "name": 0, + "description": "Condition 0" + }, + { + "id": "conditionAgentflow_0-output-1", + "label": 1, + "name": 1, + "description": "Condition 1" + }, + { + "id": "conditionAgentflow_0-output-2", + "label": 2, + "name": 2, + "description": "Else" + } + ], + "outputs": { + "conditionAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 184, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 128.47781848153903, + "y": 73.36847122134466 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 352.5679347768288, + "y": -23.510778245391947 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Software Engineer", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "azureChatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "As a Senior Software Engineer, you are a pivotal part of our innovative development team. Your expertise and leadership drive the creation of robust, scalable software solutions that meet the needs of our diverse clientele. By applying best practices in software development, you ensure that our products are reliable, efficient, and maintainable.
Your goal is to lead the development of high-quality software solutions.
Utilize your deep technical knowledge and experience to architect, design, and implement software systems that address complex problems. Collaborate closely with other engineers, reviewers to ensure that the solutions you develop align with business objectives and user needs.
Design and implement new feature for the given task, ensuring it integrates seamlessly with existing systems and meets performance requirements. Use your understanding of {technology} to build this feature. Make sure to adhere to our coding standards and follow best practices.
The output should be a fully functional, well-documented feature that enhances our product's capabilities. Include detailed comments in the code. Pass the code to Quality Assurance Engineer for review if neccessary. Once ther review is good enough, produce a finalized version of the code.
" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "{{ $flow.state.instruction }}
", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4.1", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 183, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 352.5679347768288, + "y": -23.510778245391947 + }, + "dragging": false + }, + { + "id": "agentAgentflow_2", + "position": { + "x": 358.5981605238689, + "y": 87.38558154725587 + }, + "data": { + "id": "agentAgentflow_2", + "label": "Code Reviewer", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_2-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_2-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_2-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_2-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_2-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_2-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_2-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_2-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatDeepseek", + "agentMessages": [ + { + "role": "system", + "content": "As a Quality Assurance Engineer, you are an integral part of our development team, ensuring that our software products are of the highest quality. Your meticulous attention to detail and expertise in testing methodologies are crucial in identifying defects and ensuring that our code meets the highest standards.
Your goal is to ensure the delivery of high-quality software through thorough code review and testing.
Review the codebase for the new feature designed and implemented by the Senior Software Engineer. Your expertise goes beyond mere code inspection; you are adept at ensuring that developments not only function as intended but also adhere to the team's coding standards, enhance maintainability, and seamlessly integrate with existing systems.
With a deep appreciation for collaborative development, you provide constructive feedback, guiding contributors towards best practices and fostering a culture of continuous improvement. Your meticulous approach to reviewing code, coupled with your ability to foresee potential issues and recommend proactive solutions, ensures the delivery of high-quality software that is robust, scalable, and aligned with the team's strategic goals.
Always pass back the review and feedback to Senior Software Engineer.
" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "{{ $flow.state.instruction }}
", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "deepseek-reasoner", + "temperature": 0.7, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "stopSequence": "", + "baseOptions": "", + "agentModel": "chatDeepseek" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_2-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 206, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 358.5981605238689, + "y": 87.38558154725587 + }, + "dragging": false + }, + { + "id": "agentAgentflow_3", + "position": { + "x": 357.60470406099364, + "y": 192.61532204982643 + }, + "data": { + "id": "agentAgentflow_3", + "label": "Generate Final Answer", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_3-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_3-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_3-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_3-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_3-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_3-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_3-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_3-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatAnthropic", + "agentMessages": "", + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "Given the above conversations, generate a detail solution developed by the software engineer and code reviewer. Include full code, improvements and review.
", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "claude-3-7-sonnet-latest", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "", + "agentModel": "chatAnthropic" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_3-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 231, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 357.60470406099364, + "y": 192.61532204982643 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 574.050701666824, + "y": -20.0960840521807 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop to Supervisor", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "llmAgentflow_0-Supervisor", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 186, + "height": 65, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 574.050701666824, + "y": -20.0960840521807 + } + }, + { + "id": "loopAgentflow_1", + "position": { + "x": 600.379151793432, + "y": 90.25732743474846 + }, + "data": { + "id": "loopAgentflow_1", + "label": "Loop to Supervisor", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_1-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_1-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "llmAgentflow_0-Supervisor", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 186, + "height": 65, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 600.379151793432, + "y": 90.25732743474846 + } + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -78.28788541792727, + "y": 87.1528514813091 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Supervisor", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "You are a supervisor tasked with managing a conversation between the following workers:
- Software Engineer
- Code Reviewer
Given the following user request, respond with the worker to act next.
Each worker will perform a task and respond with their results and status.
When finished, respond with FINISH.
Select strategically to minimize the number of steps taken.
" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "Given the conversation above, who should act next? Or should we FINISH? Select one of: SOFTWARE, REVIEWER
", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "next", + "type": "enum", + "enumValues": "FINISH, SOFTWARE, REVIEWER", + "jsonSchema": "", + "description": "next worker to act" + }, + { + "key": "instructions", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "The specific instructions of the sub-task the next worker should accomplish." + }, + { + "key": "reasoning", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "The reason why next worker is tasked to do the job" + } + ], + "llmUpdateState": [ + { + "key": "next", + "value": "{{ output.next }}
" + }, + { + "key": "instruction", + "value": "{{ output.instructions }}
" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": -78.28788541792727, + "y": 87.1528514813091 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "conditionAgentflow_0", + "targetHandle": "conditionAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#FFB938", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-conditionAgentflow_0-conditionAgentflow_0" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-0", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-0-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-1", + "target": "agentAgentflow_2", + "targetHandle": "agentAgentflow_2", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-1-agentAgentflow_2-agentAgentflow_2" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-2", + "target": "agentAgentflow_3", + "targetHandle": "agentAgentflow_3", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "2", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-2-agentAgentflow_3-agentAgentflow_3" + }, + { + "source": "agentAgentflow_1", + "sourceHandle": "agentAgentflow_1-output-agentAgentflow", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_1-agentAgentflow_1-output-agentAgentflow-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "agentAgentflow_2", + "sourceHandle": "agentAgentflow_2-output-agentAgentflow", + "target": "loopAgentflow_1", + "targetHandle": "loopAgentflow_1", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_2-agentAgentflow_2-output-agentAgentflow-loopAgentflow_1-loopAgentflow_1" + } + ] +} diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json index 154fd3caa..5d3084bec 100644 --- a/packages/server/marketplaces/chatflows/API Agent.json +++ b/packages/server/marketplaces/chatflows/API Agent.json @@ -4,175 +4,116 @@ "usecases": ["Interacting with API"], "nodes": [ { - "width": 300, - "height": 460, - "id": "getApiChain_0", + "id": "toolAgent_0", "position": { - "x": 1222.6923202234623, - "y": 359.97676456347756 + "x": 2142.702888476286, + "y": 52.064582962824204 }, "type": "customNode", "data": { - "id": "getApiChain_0", - "label": "GET API Chain", - "version": 1, - "name": "getApiChain", - "type": "GETApiChain", - "baseClasses": ["GETApiChain", "BaseChain", "BaseLangChain"], - "category": "Chains", - "description": "Chain to run queries against GET API", + "id": "toolAgent_0", + "label": "Tool Agent", + "version": 2, + "name": "toolAgent", + "type": "AgentExecutor", + "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], + "category": "Agents", + "description": "Agent that uses Function Calling to pick the tools and args to call", "inputParams": [ { - "label": "API Documentation", - "name": "apiDocs", + "label": "System Message", + "name": "systemMessage", "type": "string", - "description": "Description of how API works. Please refer to more examples", + "default": "You are a helpful AI assistant.", + "description": "If Chat Prompt Template is provided, this will be ignored", "rows": 4, - "id": "getApiChain_0-input-apiDocs-string" - }, - { - "label": "Headers", - "name": "headers", - "type": "json", - "additionalParams": true, "optional": true, - "id": "getApiChain_0-input-headers-json" + "additionalParams": true, + "id": "toolAgent_0-input-systemMessage-string" }, { - "label": "URL Prompt", - "name": "urlPrompt", - "type": "string", - "description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}", - "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:", - "rows": 4, + "label": "Max Iterations", + "name": "maxIterations", + "type": "number", + "optional": true, "additionalParams": true, - "id": "getApiChain_0-input-urlPrompt-string" - }, - { - "label": "Answer Prompt", - "name": "ansPrompt", - "type": "string", - "description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}", - "default": "Given this {api_response} response for {api_url}. use the given response to answer this {question}", - "rows": 4, - "additionalParams": true, - "id": "getApiChain_0-input-ansPrompt-string" + "id": "toolAgent_0-input-maxIterations-number" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "toolAgent_0-input-tools-Tool" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "toolAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Tool Calling Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "getApiChain_0-input-model-BaseLanguageModel" - } - ], - "inputs": { - "model": "{{chatOpenAI_1.data.instance}}", - "apiDocs": "BASE URL: https://api.open-meteo.com/\n\nAPI Documentation\nThe API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:\n\nParameter\tFormat\tRequired\tDefault\tDescription\nlatitude, longitude\tFloating point\tYes\t\tGeographical WGS84 coordinate of the location\nhourly\tString array\tNo\t\tA list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.\ndaily\tString array\tNo\t\tA list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.\ncurrent_weather\tBool\tNo\tfalse\tInclude current weather conditions in the JSON output.\ntemperature_unit\tString\tNo\tcelsius\tIf fahrenheit is set, all temperature values are converted to Fahrenheit.\nwindspeed_unit\tString\tNo\tkmh\tOther wind speed speed units: ms, mph and kn\nprecipitation_unit\tString\tNo\tmm\tOther precipitation amount units: inch\ntimeformat\tString\tNo\tiso8601\tIf format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.\ntimezone\tString\tNo\tGMT\tIf timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.\npast_days\tInteger (0-2)\tNo\t0\tIf past_days is set, yesterday or the day before yesterday data are also returned.\nstart_date\nend_date\tString (yyyy-mm-dd)\tNo\t\tThe time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).\nmodels\tString array\tNo\tauto\tManually select one or more weather models. Per default, the best suitable weather models will be combined.\n\nHourly Parameter Definition\nThe parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum.\n\nVariable\tValid time\tUnit\tDescription\ntemperature_2m\tInstant\t°C (°F)\tAir temperature at 2 meters above ground\nsnowfall\tPreceding hour sum\tcm (inch)\tSnowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent\nrain\tPreceding hour sum\tmm (inch)\tRain from large scale weather systems of the preceding hour in millimeter\nshowers\tPreceding hour sum\tmm (inch)\tShowers from convective precipitation in millimeters from the preceding hour\nweathercode\tInstant\tWMO code\tWeather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.\nsnow_depth\tInstant\tmeters\tSnow depth on the ground\nfreezinglevel_height\tInstant\tmeters\tAltitude above sea level of the 0°C level\nvisibility\tInstant\tmeters\tViewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.", - "headers": "", - "urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:", - "ansPrompt": "Given this {api_response} response for {api_url}. use the given response to answer this {question}" - }, - "outputAnchors": [ - { - "id": "getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain", - "name": "getApiChain", - "label": "GETApiChain", - "type": "GETApiChain | BaseChain | BaseLangChain" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1222.6923202234623, - "y": 359.97676456347756 - }, - "dragging": false - }, - { - "width": 300, - "height": 603, - "id": "chainTool_0", - "position": { - "x": 1600.1485877701232, - "y": 276.38970893436533 - }, - "type": "customNode", - "data": { - "id": "chainTool_0", - "label": "Chain Tool", - "version": 1, - "name": "chainTool", - "type": "ChainTool", - "baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"], - "category": "Tools", - "description": "Use a chain as allowed tool for agent", - "inputParams": [ - { - "label": "Chain Name", - "name": "name", - "type": "string", - "placeholder": "state-of-union-qa", - "id": "chainTool_0-input-name-string" + "type": "BaseChatModel", + "description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat", + "id": "toolAgent_0-input-model-BaseChatModel" }, { - "label": "Chain Description", - "name": "description", - "type": "string", - "rows": 3, - "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.", - "id": "chainTool_0-input-description-string" - }, - { - "label": "Return Direct", - "name": "returnDirect", - "type": "boolean", + "label": "Chat Prompt Template", + "name": "chatPromptTemplate", + "type": "ChatPromptTemplate", + "description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable", "optional": true, - "id": "chainTool_0-input-returnDirect-boolean" - } - ], - "inputAnchors": [ + "id": "toolAgent_0-input-chatPromptTemplate-ChatPromptTemplate" + }, { - "label": "Base Chain", - "name": "baseChain", - "type": "BaseChain", - "id": "chainTool_0-input-baseChain-BaseChain" + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "toolAgent_0-input-inputModeration-Moderation" } ], "inputs": { - "name": "weather-qa", - "description": "useful for when you need to ask question about weather", - "returnDirect": false, - "baseChain": "{{getApiChain_0.data.instance}}" + "tools": ["{{openAPIToolkit_0.data.instance}}"], + "memory": "{{bufferMemory_0.data.instance}}", + "model": "{{chatAnthropic_0.data.instance}}", + "chatPromptTemplate": "", + "systemMessage": "You are an agent that can interact with the API to perform specific tasks based on user requests.\n\nYour main goal is to understand the user's needs, make appropriate API calls, and return the results in a clear format. Ensure you verify inputs before making API requests and handle errors gracefully if the API fails.\n\n# Steps\n\n1. **Receive User Input:** Listen carefully to the user's request and identify key parameters needed for the API call.\n2. **Validate Input:** Ensure that the user input is in the correct format and contains all necessary information.\n3. **Make API Call:** Use the provided OpenAPI tools to call appropriate API endpoint with the validated input.\n", + "inputModeration": "", + "maxIterations": "" }, "outputAnchors": [ { - "id": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", - "name": "chainTool", - "label": "ChainTool", - "type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain" + "id": "toolAgent_0-output-toolAgent-AgentExecutor|BaseChain|Runnable", + "name": "toolAgent", + "label": "AgentExecutor", + "description": "Agent that uses Function Calling to pick the tools and args to call", + "type": "AgentExecutor | BaseChain | Runnable" } ], "outputs": {}, "selected": false }, + "width": 300, + "height": 483, "selected": false, "positionAbsolute": { - "x": 1600.1485877701232, - "y": 276.38970893436533 + "x": 2142.702888476286, + "y": 52.064582962824204 }, "dragging": false }, { - "width": 300, - "height": 253, "id": "bufferMemory_0", "position": { - "x": 1642.0644080121785, - "y": 1715.6131926891728 + "x": 1017.5366991719394, + "y": 70.40237946649512 }, "type": "customNode", "data": { @@ -214,909 +155,282 @@ "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", "name": "bufferMemory", "label": "BufferMemory", + "description": "Retrieve chat messages stored in database", "type": "BufferMemory | BaseChatMemory | BaseMemory" } ], "outputs": {}, "selected": false }, + "width": 300, + "height": 250, "selected": false, "positionAbsolute": { - "x": 1642.0644080121785, - "y": 1715.6131926891728 + "x": 1017.5366991719394, + "y": 70.40237946649512 }, "dragging": false }, { - "width": 300, - "height": 603, - "id": "chainTool_1", + "id": "chatAnthropic_0", "position": { - "x": 1284.7746596034926, - "y": 895.1444797047182 + "x": 1782.2489802995697, + "y": -97.03292069533617 }, "type": "customNode", "data": { - "id": "chainTool_1", - "label": "Chain Tool", - "version": 1, - "name": "chainTool", - "type": "ChainTool", - "baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"], - "category": "Tools", - "description": "Use a chain as allowed tool for agent", + "id": "chatAnthropic_0", + "label": "ChatAnthropic", + "version": 8, + "name": "chatAnthropic", + "type": "ChatAnthropic", + "baseClasses": ["ChatAnthropic", "ChatAnthropicMessages", "BaseChatModel", "BaseLanguageModel", "Runnable"], + "category": "Chat Models", + "description": "Wrapper around ChatAnthropic large language models that use the Chat endpoint", "inputParams": [ { - "label": "Chain Name", - "name": "name", - "type": "string", - "placeholder": "state-of-union-qa", - "id": "chainTool_1-input-name-string" + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["anthropicApi"], + "id": "chatAnthropic_0-input-credential-credential", + "display": true }, { - "label": "Chain Description", - "name": "description", - "type": "string", - "rows": 3, - "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.", - "id": "chainTool_1-input-description-string" + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "claude-3-haiku", + "id": "chatAnthropic_0-input-modelName-asyncOptions", + "display": true }, { - "label": "Return Direct", - "name": "returnDirect", - "type": "boolean", + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, "optional": true, - "id": "chainTool_1-input-returnDirect-boolean" + "id": "chatAnthropic_0-input-temperature-number", + "display": true + }, + { + "label": "Streaming", + "name": "streaming", + "type": "boolean", + "default": true, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-streaming-boolean", + "display": true + }, + { + "label": "Max Tokens", + "name": "maxTokensToSample", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-maxTokensToSample-number", + "display": true + }, + { + "label": "Top P", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-topP-number", + "display": true + }, + { + "label": "Top K", + "name": "topK", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-topK-number", + "display": true + }, + { + "label": "Extended Thinking", + "name": "extendedThinking", + "type": "boolean", + "description": "Enable extended thinking for reasoning model such as Claude Sonnet 3.7", + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-extendedThinking-boolean", + "display": true + }, + { + "label": "Budget Tokens", + "name": "budgetTokens", + "type": "number", + "step": 1, + "default": 1024, + "description": "Maximum number of tokens Claude is allowed use for its internal reasoning process", + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-budgetTokens-number", + "display": true + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Allow image input. Refer to the docs for more details.", + "default": false, + "optional": true, + "id": "chatAnthropic_0-input-allowImageUploads-boolean", + "display": true } ], "inputAnchors": [ { - "label": "Base Chain", - "name": "baseChain", - "type": "BaseChain", - "id": "chainTool_1-input-baseChain-BaseChain" + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatAnthropic_0-input-cache-BaseCache", + "display": true } ], "inputs": { - "name": "discord-bot", - "description": "useful for when you need to send message to Discord", - "returnDirect": "", - "baseChain": "{{postApiChain_0.data.instance}}" + "cache": "", + "modelName": "claude-3-5-haiku-latest", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "" }, "outputAnchors": [ { - "id": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", - "name": "chainTool", - "label": "ChainTool", - "type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain" + "id": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|ChatAnthropicMessages|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatAnthropic", + "label": "ChatAnthropic", + "description": "Wrapper around ChatAnthropic large language models that use the Chat endpoint", + "type": "ChatAnthropic | ChatAnthropicMessages | BaseChatModel | BaseLanguageModel | Runnable" } ], "outputs": {}, "selected": false }, + "width": 300, + "height": 668, "selected": false, "positionAbsolute": { - "x": 1284.7746596034926, - "y": 895.1444797047182 + "x": 1782.2489802995697, + "y": -97.03292069533617 }, "dragging": false }, { - "width": 300, - "height": 460, - "id": "postApiChain_0", + "id": "openAPIToolkit_0", "position": { - "x": 933.3631140153886, - "y": 974.8756002461283 + "x": 1406.3474125716532, + "y": -26.543208700976493 }, "type": "customNode", "data": { - "id": "postApiChain_0", - "label": "POST API Chain", - "version": 1, - "name": "postApiChain", - "type": "POSTApiChain", - "baseClasses": ["POSTApiChain", "BaseChain", "BaseLangChain"], - "category": "Chains", - "description": "Chain to run queries against POST API", + "id": "openAPIToolkit_0", + "label": "OpenAPI Toolkit", + "version": 2, + "name": "openAPIToolkit", + "type": "OpenAPIToolkit", + "baseClasses": ["OpenAPIToolkit", "Tool"], + "category": "Tools", + "description": "Load OpenAPI specification, and converts each API endpoint to a tool", "inputParams": [ { - "label": "API Documentation", - "name": "apiDocs", - "type": "string", - "description": "Description of how API works. Please refer to more examples", - "rows": 4, - "id": "postApiChain_0-input-apiDocs-string" + "label": "YAML File", + "name": "yamlFile", + "type": "file", + "fileType": ".yaml", + "id": "openAPIToolkit_0-input-yamlFile-file", + "display": true + }, + { + "label": "Return Direct", + "name": "returnDirect", + "description": "Return the output of the tool directly to the user", + "type": "boolean", + "optional": true, + "id": "openAPIToolkit_0-input-returnDirect-boolean", + "display": true }, { "label": "Headers", "name": "headers", "type": "json", + "description": "Request headers to be sent with the API request. For example, {\"Authorization\": \"Bearer token\"}", "additionalParams": true, "optional": true, - "id": "postApiChain_0-input-headers-json" + "id": "openAPIToolkit_0-input-headers-json", + "display": true }, { - "label": "URL Prompt", - "name": "urlPrompt", - "type": "string", - "description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}", - "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:", - "rows": 4, - "additionalParams": true, - "id": "postApiChain_0-input-urlPrompt-string" - }, - { - "label": "Answer Prompt", - "name": "ansPrompt", - "type": "string", - "description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}", - "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:", - "rows": 4, - "additionalParams": true, - "id": "postApiChain_0-input-ansPrompt-string" - } - ], - "inputAnchors": [ - { - "label": "Language Model", - "name": "model", - "type": "BaseLanguageModel", - "id": "postApiChain_0-input-model-BaseLanguageModel" - } - ], - "inputs": { - "model": "{{chatOpenAI_2.data.instance}}", - "apiDocs": "API documentation:\nEndpoint: https://some-discord-webhook.com\n\nThis API is for sending Discord message\n\nQuery body table:\nmessage | string | Message to send | required\n\nResponse schema (string):\nresult | string", - "headers": "", - "urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:", - "ansPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:" - }, - "outputAnchors": [ - { - "id": "postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain", - "name": "postApiChain", - "label": "POSTApiChain", - "type": "POSTApiChain | BaseChain | BaseLangChain" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 933.3631140153886, - "y": 974.8756002461283 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_2", - "position": { - "x": 572.8941615312035, - "y": 937.8425220917356 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_2", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_2-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_2-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "default": 0.9, - "optional": true, - "id": "chatOpenAI_2-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", + "label": "Remove null parameters", + "name": "removeNulls", "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, "optional": true, - "id": "chatOpenAI_2-input-allowImageUploads-boolean" + "description": "Remove all keys with null values from the parsed arguments", + "id": "openAPIToolkit_0-input-removeNulls-boolean", + "display": true }, { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, + "label": "Custom Code", + "name": "customCode", + "type": "code", + "hint": { + "label": "How to use", + "value": "- **Libraries:** \n You can use any libraries imported in Flowise.\n\n- **Tool Input Arguments:** \n Tool input arguments are available as the following variables:\n - `$PathParameters`\n - `$QueryParameters`\n - `$RequestBody`\n\n- **HTTP Requests:** \n By default, you can get the following values for making HTTP requests:\n - `$url`\n - `$options`\n\n- **Default Flow Config:** \n You can access the default flow configuration using these variables:\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n- **Custom Variables:** \n You can get custom variables using the syntax:\n - `$vars.